max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
tests/api.py
|
edyan/python-anyconfig
| 0
|
6629651
|
#
# Copyright (C) 2012 - 2017 <NAME> <ssato at redhat.com>
# License: MIT
#
# pylint: disable=missing-docstring, invalid-name, no-member
from __future__ import absolute_import
import copy
import logging
import io
import os
import os.path
import unittest
import anyconfig.api as TT
import anyconfig.backends
import anyconfig.compat
import anyconfig.dicts
import anyconfig.template
import tests.common
from tests.common import CNF_0, SCM_0, CNF_1, dicts_equal, resdir
# suppress logging messages.
TT.LOGGER.setLevel(logging.CRITICAL)
CNF_TMPL_0 = """name: {{ name|default('a') }}
a: {{ a }}
b:
b:
{% for x in b.b -%}
- {{ x }}
{% endfor %}
c: {{ b.c }}
"""
CNF_TMPL_1 = """a: {{ a }}
b:
b:
{% for x in b.b -%}
- {{ x }}
{% endfor %}
c: {{ b.c }}
name: {{ name }}
"""
CNF_TMPL_2 = """a: {{ a }}
b:
b:
{% for x in b.b -%}
- {{ x }}
{% endfor %}
d: {{ b.d }}
e: 0
"""
CNF_XML_1 = {'config': {'@attrs': {'name': 'foo'},
'a': '0',
'b': {'@attrs': {'id': 'b0'}, '@text': 'bbb'},
'c': None,
'sect0': {'d': 'x, y, z'},
'list1': [{'item': '0'}, {'item': '1'},
{'item': '2'}],
'list2': {'@attrs': {'id': 'list2'},
'@children': [{'item': 'i'},
{'item': 'j'}]}}}
NULL_CNTNR = TT.anyconfig.dicts.convert_to({})
class MyODict(anyconfig.compat.OrderedDict):
pass
def _is_file_object(obj):
try:
return isinstance(obj, file)
except NameError: # python 3.x
return isinstance(obj, io.IOBase)
class Test_10_find_loader(unittest.TestCase):
def _assert_isinstance(self, obj, cls, msg=False):
self.assertTrue(isinstance(obj, cls), msg or repr(obj))
def test_10_find_loader__w_parser_type_or_instance_or_by_file(self):
cpath = "dummy.conf"
for psr in anyconfig.backends.Parsers().list():
self._assert_isinstance(TT.find_loader(cpath, psr.type()), psr)
self._assert_isinstance(TT.find_loader(cpath, psr()), psr)
for ext in psr.extensions():
self._assert_isinstance(TT.find_loader("dummy." + ext), psr,
"ext=%s, psr=%r" % (ext, psr))
def test_30_find_loader__unknown_parser_type(self):
self.assertRaises(TT.UnknownProcessorTypeError,
TT.find_loader, "a.cnf", "type_not_exist")
def test_40_find_loader__unknown_file_type(self):
self.assertRaises(TT.UnknownFileTypeError,
TT.find_loader, "dummy.ext_not_found")
class TestBase(unittest.TestCase):
cnf = dic = dict(a=1, b=dict(b=[0, 1], c="C"), name="a")
upd = dict(a=2, b=dict(b=[1, 2, 3, 4, 5], d="D"), e=0)
def assert_dicts_equal(self, dic, ref, ordered=False):
self.assertTrue(dicts_equal(dic, ref, ordered=ordered),
"%r%s vs.%s%r" % (dic, os.linesep, os.linesep, ref))
class Test_20_dumps_and_loads(TestBase):
def test_30_dumps_and_loads(self):
res = TT.loads(TT.dumps(self.cnf, "json"), "json")
self.assert_dicts_equal(res, self.cnf)
def test_30_dumps_and_loads__w_options(self):
res = TT.loads(TT.dumps(self.cnf, "json", indent=2), "json",
ensure_ascii=False)
self.assert_dicts_equal(res, self.cnf)
def test_40_loads_wo_type(self):
cnf_s = "requires:bash,zsh"
self.assertTrue(TT.loads(cnf_s) is None)
def test_42_loads_w_type_not_exist(self):
a_s = "requires:bash,zsh"
self.assertRaises(TT.UnknownProcessorTypeError,
TT.loads, a_s, "type_not_exist")
def test_44_loads_w_type__template(self):
if not anyconfig.template.SUPPORTED:
return
a_s = "requires: [{{ requires|join(', ') }}]"
reqs = dict(requires=["bash", "zsh"])
a1 = TT.loads(a_s, ac_parser="yaml", ac_template=True,
ac_context=reqs)
self.assertEqual(a1["requires"], reqs["requires"])
def test_46_loads_w_type__broken_template(self):
if not anyconfig.template.SUPPORTED:
return
a = dict(requires="{% }}", )
a_s = 'requires: "{% }}"'
a1 = TT.loads(a_s, ac_parser="yaml", ac_template=True,
ac_context={})
self.assertEqual(a1["requires"], a["requires"])
def test_48_loads_w_validation(self):
cnf_s = TT.dumps(CNF_0, "json")
scm_s = TT.dumps(SCM_0, "json")
cnf_2 = TT.loads(cnf_s, ac_parser="json", ac_context={},
ac_schema=scm_s)
self.assertEqual(cnf_2["name"], CNF_0["name"])
self.assertEqual(cnf_2["a"], CNF_0["a"])
self.assertEqual(cnf_2["b"]["b"], CNF_0["b"]["b"])
self.assertEqual(cnf_2["b"]["c"], CNF_0["b"]["c"])
def test_49_loads_w_validation_error(self):
cnf_s = """{"a": "aaa"}"""
scm_s = TT.dumps(SCM_0, "json")
cnf_2 = TT.loads(cnf_s, ac_parser="json", ac_schema=scm_s)
self.assertTrue(cnf_2 is None, cnf_2)
class Test_22_single_load(TestBase):
a_path = os.path.join(resdir(), "00-cnf.json")
cnf = CNF_1
pathlib = anyconfig.compat.pathlib
def test_10__single_load(self):
res = TT.single_load(self.a_path)
self.assert_dicts_equal(res, self.cnf)
def test_12__single_load__ac_parser(self):
res = TT.single_load(self.a_path, ac_parser="json")
self.assert_dicts_equal(res, self.cnf)
def test_20__single_load__stream(self):
res = TT.single_load(open(self.a_path), ac_parser="json")
self.assert_dicts_equal(res, self.cnf)
def test_30__single_load__pathlib(self):
if self.pathlib:
pobj = self.pathlib.Path(self.a_path)
res = TT.single_load(pobj)
self.assert_dicts_equal(res, self.cnf)
class TestBaseWithIO(TestBase):
def setUp(self):
self.workdir = tests.common.setup_workdir()
self.a_path = os.path.join(self.workdir, "a.json")
self.exp = copy.deepcopy(self.dic)
def tearDown(self):
tests.common.cleanup_workdir(self.workdir)
class Test_30_single_load(TestBaseWithIO):
def test_10_dump_and_single_load(self):
TT.dump(self.cnf, self.a_path)
self.assertTrue(os.path.exists(self.a_path))
res = TT.single_load(self.a_path)
self.assert_dicts_equal(res, self.cnf)
def test_11_dump_and_single_load__to_from_stream(self):
TT.dump(self.cnf, TT.open(self.a_path, mode='w'))
self.assertTrue(os.path.exists(self.a_path))
res = TT.single_load(TT.open(self.a_path))
self.assert_dicts_equal(res, self.cnf)
def test_12_dump_and_single_load__no_parser(self):
self.assertRaises(TT.UnknownFileTypeError,
TT.single_load, "dummy.ext_not_exist")
def test_14_single_load__ignore_missing(self):
cpath = os.path.join(os.curdir, "conf_file_should_not_exist")
assert not os.path.exists(cpath)
self.assertEqual(TT.single_load(cpath, "ini", ac_ignore_missing=True),
NULL_CNTNR)
def test_15_single_load__fail_to_render_template(self):
if not anyconfig.template.SUPPORTED:
return
cnf_s = "name: '{{ name'" # Should cause template renering error.
cpath = os.path.join(self.workdir, "a.yaml")
TT.open(cpath, mode='w').write(cnf_s)
cnf = TT.single_load(cpath, ac_template=True, ac_context=dict(a=1))
self.assertEqual(cnf["name"], "{{ name")
def test_16_single_load__template(self):
if not anyconfig.template.SUPPORTED:
return
cpath = os.path.join(self.workdir, "a.yaml")
TT.open(cpath, mode='w').write(CNF_TMPL_0)
cnf = TT.single_load(cpath, ac_template=True, ac_context=self.cnf)
self.assert_dicts_equal(cnf, self.cnf)
spath = os.path.join(self.workdir, "scm.json")
TT.dump(dict(type="integer"), spath) # Validation should fail.
cnf2 = TT.single_load(cpath, ac_template=True, ac_context=self.cnf,
ac_schema=spath)
self.assertTrue(cnf2 is None)
def test_18_single_load__templates(self):
if not anyconfig.template.SUPPORTED:
return
a_path = os.path.join(self.workdir, "a.yml")
b_path = os.path.join(self.workdir, "b.yml")
a2_path = os.path.join(self.workdir, "x/y/z", "a.yml")
open(a_path, 'w').write("{% include 'b.yml' %}")
open(b_path, 'w').write(CNF_TMPL_0)
os.makedirs(os.path.dirname(a2_path))
open(a2_path, 'w').write("a: 'xyz'")
cnf1 = TT.single_load(a_path, ac_template=True, ac_context=self.cnf)
self.assertTrue(dicts_equal(self.cnf, cnf1), str(cnf1))
cnf2 = TT.single_load(a2_path, ac_template=True)
self.assertEqual(cnf2["a"], "xyz")
def test_19_dump_and_single_load_with_validation(self):
cnf = CNF_0
scm = SCM_0
cnf_path = os.path.join(self.workdir, "cnf_19.json")
scm_path = os.path.join(self.workdir, "scm_19.json")
TT.dump(cnf, cnf_path)
TT.dump(scm, scm_path)
self.assertTrue(os.path.exists(cnf_path))
self.assertTrue(os.path.exists(scm_path))
cnf_1 = TT.single_load(cnf_path, ac_schema=scm_path)
self.assertFalse(cnf_1 is None) # Validation should succeed.
self.assertTrue(dicts_equal(cnf_1, cnf), cnf_1)
cnf_2 = cnf.copy()
cnf_2["a"] = "aaa" # It's type should be integer not string.
cnf_2_path = os.path.join(self.workdir, "cnf_19_2.json")
TT.dump(cnf_2, cnf_2_path)
self.assertTrue(os.path.exists(cnf_2_path))
cnf_3 = TT.single_load(cnf_2_path, ac_schema=scm_path)
self.assertTrue(cnf_3 is None) # Validation should fail.
def test_20_dump_and_single_load__w_ordered_option(self):
TT.dump(self.cnf, self.a_path)
self.assertTrue(os.path.exists(self.a_path))
# It works w/ JSON backend but some backend cannot keep the order of
# items and the tests might fail.
res = TT.single_load(self.a_path, ac_ordered=True)
self.assert_dicts_equal(res, self.cnf, ordered=True)
self.assertTrue(isinstance(res, anyconfig.compat.OrderedDict))
def test_22_dump_and_single_load__w_ac_dict_option(self):
TT.dump(self.cnf, self.a_path)
self.assertTrue(os.path.exists(self.a_path))
res = TT.single_load(self.a_path, ac_dict=MyODict)
self.assert_dicts_equal(res, self.cnf, ordered=True)
self.assertTrue(isinstance(res, MyODict))
class Test_32_single_load(unittest.TestCase):
cnf = CNF_XML_1
def setUp(self):
self.workdir = tests.common.setup_workdir()
def tearDown(self):
tests.common.cleanup_workdir(self.workdir)
def _load_and_dump_with_opened_files(self, filename, rmode='r', wmode='w',
**oopts):
cpath = os.path.join(self.workdir, filename)
with TT.open(cpath, 'w', **oopts) as out:
TT.dump(self.cnf, out)
self.assertTrue(_is_file_object(out))
self.assertEqual(out.mode, wmode)
with TT.open(cpath, 'rb', **oopts) as inp:
cnf1 = TT.single_load(inp)
self.assertTrue(_is_file_object(inp))
self.assertEqual(inp.mode, rmode)
cpair = (self.cnf, cnf1)
self.assertTrue(dicts_equal(*cpair), "%r vs. %r" % cpair)
def test_10_open_json_file(self):
self._load_and_dump_with_opened_files("a.json")
def test_20_open_xml_file(self):
if "xml" in TT.list_types():
self._load_and_dump_with_opened_files("a.xml", 'rb', 'wb')
def test_30_open_bson_file(self):
if "bson" in TT.list_types():
self._load_and_dump_with_opened_files("a.bson", 'rb', 'wb')
def test_40_open_yaml_file(self):
if "yaml" in TT.list_types():
self._load_and_dump_with_opened_files("a.yaml")
self._load_and_dump_with_opened_files("a.yml")
class Test_34_single_load(TestBaseWithIO):
def test_10_single_load_w_validation(self):
cnf_path = os.path.join(self.workdir, "cnf.json")
scm_path = os.path.join(self.workdir, "scm.json")
TT.dump(CNF_0, cnf_path)
TT.dump(SCM_0, scm_path)
cnf_2 = TT.single_load(cnf_path, ac_context={}, ac_schema=scm_path)
self.assertEqual(cnf_2["name"], CNF_0["name"])
self.assertEqual(cnf_2["a"], CNF_0["a"])
self.assertEqual(cnf_2["b"]["b"], CNF_0["b"]["b"])
self.assertEqual(cnf_2["b"]["c"], CNF_0["b"]["c"])
def test_20_single_load_w_query(self):
cpath = os.path.join(self.workdir, "cnf.json")
TT.dump(CNF_0, cpath)
try:
if TT.query.jmespath:
self.assertEqual(TT.single_load(cpath, ac_query="a"), 1)
self.assertEqual(TT.single_load(cpath, ac_query="b.b"), [1, 2])
self.assertEqual(TT.single_load(cpath, ac_query="b.b[1]"), 2)
self.assertEqual(TT.single_load(cpath, ac_query="b.b[1:]"),
[2])
self.assertEqual(TT.single_load(cpath, ac_query="b.b[::-1]"),
[2, 1])
self.assertEqual(TT.single_load(cpath, ac_query="length(b.b)"),
2)
except (NameError, AttributeError):
pass # jmespath is not available.
class TestBaseWithIOMultiFiles(TestBaseWithIO):
def setUp(self):
super(TestBaseWithIOMultiFiles, self).setUp()
self.b_path = os.path.join(self.workdir, "b.json")
self.g_path = os.path.join(self.workdir, "*.json")
exp = copy.deepcopy(self.upd) # Assume MS_DICTS strategy was used.
exp["b"]["c"] = self.dic["b"]["c"]
exp["name"] = self.dic["name"]
self.exp = exp
class Test_40_multi_load_with_strategies(TestBaseWithIOMultiFiles):
def _check_multi_load_with_strategy(self, exp, merge=TT.MS_DICTS):
TT.dump(self.dic, self.a_path)
TT.dump(self.upd, self.b_path)
self.assertTrue(os.path.exists(self.a_path))
self.assertTrue(os.path.exists(self.b_path))
res0 = TT.multi_load(self.g_path, ac_merge=merge)
res1 = TT.multi_load([self.g_path, self.b_path], ac_merge=merge)
self.assertTrue(res0)
self.assertTrue(res1)
self.assert_dicts_equal(res0, exp)
self.assert_dicts_equal(res1, exp)
def test_10_default_merge_strategy(self):
exp = copy.deepcopy(self.upd)
exp["b"]["c"] = self.dic["b"]["c"]
exp["name"] = self.dic["name"]
self._check_multi_load_with_strategy(exp, merge=None)
self._check_multi_load_with_strategy(exp)
def test_20_merge_dicts_and_lists(self):
exp = copy.deepcopy(self.upd)
exp["b"]["b"] = [0] + self.upd["b"]["b"]
exp["b"]["c"] = self.dic["b"]["c"]
exp["name"] = self.dic["name"]
self._check_multi_load_with_strategy(exp, merge=TT.MS_DICTS_AND_LISTS)
def test_30_merge_with_replace(self):
exp = copy.deepcopy(self.upd)
exp["name"] = self.dic["name"]
self._check_multi_load_with_strategy(exp, merge=TT.MS_REPLACE)
def test_40_merge_wo_replace(self):
exp = copy.deepcopy(self.dic)
exp["e"] = self.upd["e"]
self._check_multi_load_with_strategy(exp, merge=TT.MS_NO_REPLACE)
def test_60_wrong_merge_strategy(self):
cpath = os.path.join(self.workdir, "a.json")
TT.dump(dict(a=1, b=2), cpath)
try:
TT.multi_load([cpath, cpath], ac_merge="merge_st_not_exist")
raise RuntimeError("Wrong merge strategy was not handled!")
except ValueError:
self.assertTrue(1 == 1) # To suppress warn of pylint.
class Test_42_multi_load(TestBaseWithIOMultiFiles):
def test_10_multi_load__empty_path_list(self):
self.assertEqual(TT.multi_load([]), NULL_CNTNR)
def test_20_dump_and_multi_load__mixed_file_types(self):
c_path = os.path.join(self.workdir, "c.yml")
TT.dump(self.dic, self.a_path) # JSON
try:
TT.dump(self.upd, c_path) # YAML
except (TT.UnknownProcessorTypeError, TT.UnknownFileTypeError):
return # YAML backend is not available in this env.
self.assertTrue(os.path.exists(self.a_path))
self.assertTrue(os.path.exists(c_path))
res = TT.multi_load([self.a_path, c_path])
self.assert_dicts_equal(res, self.exp)
def test_30_dump_and_multi_load__to_from_stream(self):
TT.dump(self.dic, self.a_path)
TT.dump(self.upd, self.b_path)
res = TT.multi_load([TT.open(self.a_path), TT.open(self.b_path)])
self.assert_dicts_equal(res, self.exp)
def test_40_multi_load__ignore_missing(self):
cpath = os.path.join(os.curdir, "conf_file_should_not_exist")
assert not os.path.exists(cpath)
self.assertEqual(TT.multi_load([cpath], ac_parser="ini",
ac_ignore_missing=True),
NULL_CNTNR)
# It will be remove after 'ignore_missing' was deprecated and removed.
self.assertEqual(TT.multi_load([cpath], ac_parser="ini",
ignore_missing=True),
NULL_CNTNR)
def test_50_multi_load__templates(self):
if not anyconfig.template.SUPPORTED:
return
ctx = self.dic.copy()
TT.merge(ctx, self.upd, ac_merge=TT.MS_DICTS)
a_path = self.a_path.replace(".json", ".yml")
b_path = self.b_path.replace(".json", ".yml")
g_path = self.g_path.replace(".json", ".yml")
TT.open(a_path, mode='w').write(CNF_TMPL_1)
TT.open(b_path, mode='w').write(CNF_TMPL_2)
opts = dict(ac_merge=TT.MS_DICTS, ac_template=True, ac_context=ctx)
try:
res0 = TT.multi_load(g_path, **opts)
res1 = TT.multi_load([g_path, b_path], **opts)
except (TT.UnknownProcessorTypeError, TT.UnknownFileTypeError):
return
self.assert_dicts_equal(res0, self.exp)
self.assert_dicts_equal(res1, self.exp)
def test_60_multi_load__w_ac_dict_option(self):
TT.dump(self.dic, self.a_path)
TT.dump(self.upd, self.b_path)
res = TT.multi_load(self.g_path, ac_dict=MyODict)
self.assert_dicts_equal(res, self.exp)
self.assertTrue(isinstance(res, MyODict))
class Test_44_multi_load(TestBase):
def test_10_multi_load_w_validation_for_partial_single_config_files(self):
cpaths = [os.path.join(resdir(), "00-00-cnf.json"),
os.path.join(resdir(), "00-01-cnf.json"),
os.path.join(resdir(), "00-02-cnf.json")]
spath = os.path.join(resdir(), "00-scm.json")
cnf = TT.multi_load(cpaths, ac_schema=spath)
ref = TT.multi_load(cpaths)
self.assert_dicts_equal(cnf, ref, ordered=False)
class Test_50_load_and_dump(TestBaseWithIOMultiFiles):
def test_30_dump_and_load(self):
TT.dump(self.dic, self.a_path)
TT.dump(self.upd, self.b_path)
self.assertTrue(os.path.exists(self.a_path))
self.assertTrue(os.path.exists(self.b_path))
res = TT.load(self.a_path)
self.assert_dicts_equal(res, self.dic)
res = TT.load(self.g_path)
self.assert_dicts_equal(res, self.exp)
res = TT.load([self.a_path, self.b_path])
self.assert_dicts_equal(res, self.exp)
def test_31_dump_and_load__to_from_stream(self):
with TT.open(self.a_path, mode='w') as strm:
TT.dump(self.dic, strm)
self.assertTrue(os.path.exists(self.a_path))
with TT.open(self.a_path) as strm:
res = TT.load(strm, ac_parser="json")
self.assert_dicts_equal(res, self.dic)
def test_32_dump_and_load__w_options(self):
TT.dump(self.dic, self.a_path, indent=2)
self.assertTrue(os.path.exists(self.a_path))
TT.dump(self.upd, self.b_path, indent=2)
self.assertTrue(os.path.exists(self.b_path))
res = TT.load(self.a_path, parse_int=int)
dic = copy.deepcopy(self.dic)
self.assert_dicts_equal(res, dic)
res = TT.load(self.g_path, parse_int=int)
exp = copy.deepcopy(self.exp)
self.assert_dicts_equal(res, exp)
res = TT.load([self.a_path, self.b_path], parse_int=int)
exp = copy.deepcopy(self.exp)
self.assert_dicts_equal(res, exp)
def test_34_load__ignore_missing(self):
cpath = os.path.join(os.curdir, "conf_file_should_not_exist")
assert not os.path.exists(cpath)
self.assertEqual(TT.load([cpath], ac_parser="ini",
ignore_missing=True),
NULL_CNTNR)
def test_36_load_w_validation(self):
cnf_path = os.path.join(self.workdir, "cnf.json")
scm_path = os.path.join(self.workdir, "scm.json")
TT.dump(CNF_0, cnf_path)
TT.dump(SCM_0, scm_path)
cnf_2 = TT.load(cnf_path, ac_context={}, ac_schema=scm_path)
self.assertEqual(cnf_2["name"], CNF_0["name"])
self.assertEqual(cnf_2["a"], CNF_0["a"])
self.assertEqual(cnf_2["b"]["b"], CNF_0["b"]["b"])
self.assertEqual(cnf_2["b"]["c"], CNF_0["b"]["c"])
def test_38_load_w_validation_yaml(self):
cnf_path = os.path.join(self.workdir, "cnf.yml")
scm_path = os.path.join(self.workdir, "scm.yml")
TT.dump(CNF_0, cnf_path)
TT.dump(SCM_0, scm_path)
cnf_2 = TT.load(cnf_path, ac_context={}, ac_schema=scm_path)
self.assertEqual(cnf_2["name"], CNF_0["name"])
self.assertEqual(cnf_2["a"], CNF_0["a"])
self.assertEqual(cnf_2["b"]["b"], CNF_0["b"]["b"])
self.assertEqual(cnf_2["b"]["c"], CNF_0["b"]["c"])
def test_39_single_load__w_validation(self):
(cnf, scm) = (CNF_0, SCM_0)
cpath = os.path.join(self.workdir, "cnf.json")
spath = os.path.join(self.workdir, "scm.json")
TT.dump(cnf, cpath)
TT.dump(scm, spath)
cnf1 = TT.single_load(cpath, ac_schema=spath)
self.assert_dicts_equal(cnf, cnf1)
def test_40_load_w_query(self):
cnf_path = os.path.join(self.workdir, "cnf.json")
TT.dump(CNF_0, cnf_path)
try:
if TT.query.jmespath:
self.assertEqual(TT.load(cnf_path, ac_query="a"), 1)
self.assertEqual(TT.load(cnf_path, ac_query="b.b"), [1, 2])
self.assertEqual(TT.load(cnf_path, ac_query="b.b[1]"), 2)
self.assertEqual(TT.load(cnf_path, ac_query="b.b[1:]"), [2])
self.assertEqual(TT.load(cnf_path, ac_query="b.b[::-1]"),
[2, 1])
self.assertEqual(TT.load(cnf_path, ac_query="length(b.b)"), 2)
except (NameError, AttributeError):
pass # jmespath is not available.
# vim:sw=4:ts=4:et:
|
#
# Copyright (C) 2012 - 2017 <NAME> <ssato at redhat.com>
# License: MIT
#
# pylint: disable=missing-docstring, invalid-name, no-member
from __future__ import absolute_import
import copy
import logging
import io
import os
import os.path
import unittest
import anyconfig.api as TT
import anyconfig.backends
import anyconfig.compat
import anyconfig.dicts
import anyconfig.template
import tests.common
from tests.common import CNF_0, SCM_0, CNF_1, dicts_equal, resdir
# suppress logging messages.
TT.LOGGER.setLevel(logging.CRITICAL)
CNF_TMPL_0 = """name: {{ name|default('a') }}
a: {{ a }}
b:
b:
{% for x in b.b -%}
- {{ x }}
{% endfor %}
c: {{ b.c }}
"""
CNF_TMPL_1 = """a: {{ a }}
b:
b:
{% for x in b.b -%}
- {{ x }}
{% endfor %}
c: {{ b.c }}
name: {{ name }}
"""
CNF_TMPL_2 = """a: {{ a }}
b:
b:
{% for x in b.b -%}
- {{ x }}
{% endfor %}
d: {{ b.d }}
e: 0
"""
CNF_XML_1 = {'config': {'@attrs': {'name': 'foo'},
'a': '0',
'b': {'@attrs': {'id': 'b0'}, '@text': 'bbb'},
'c': None,
'sect0': {'d': 'x, y, z'},
'list1': [{'item': '0'}, {'item': '1'},
{'item': '2'}],
'list2': {'@attrs': {'id': 'list2'},
'@children': [{'item': 'i'},
{'item': 'j'}]}}}
NULL_CNTNR = TT.anyconfig.dicts.convert_to({})
class MyODict(anyconfig.compat.OrderedDict):
pass
def _is_file_object(obj):
try:
return isinstance(obj, file)
except NameError: # python 3.x
return isinstance(obj, io.IOBase)
class Test_10_find_loader(unittest.TestCase):
def _assert_isinstance(self, obj, cls, msg=False):
self.assertTrue(isinstance(obj, cls), msg or repr(obj))
def test_10_find_loader__w_parser_type_or_instance_or_by_file(self):
cpath = "dummy.conf"
for psr in anyconfig.backends.Parsers().list():
self._assert_isinstance(TT.find_loader(cpath, psr.type()), psr)
self._assert_isinstance(TT.find_loader(cpath, psr()), psr)
for ext in psr.extensions():
self._assert_isinstance(TT.find_loader("dummy." + ext), psr,
"ext=%s, psr=%r" % (ext, psr))
def test_30_find_loader__unknown_parser_type(self):
self.assertRaises(TT.UnknownProcessorTypeError,
TT.find_loader, "a.cnf", "type_not_exist")
def test_40_find_loader__unknown_file_type(self):
self.assertRaises(TT.UnknownFileTypeError,
TT.find_loader, "dummy.ext_not_found")
class TestBase(unittest.TestCase):
cnf = dic = dict(a=1, b=dict(b=[0, 1], c="C"), name="a")
upd = dict(a=2, b=dict(b=[1, 2, 3, 4, 5], d="D"), e=0)
def assert_dicts_equal(self, dic, ref, ordered=False):
self.assertTrue(dicts_equal(dic, ref, ordered=ordered),
"%r%s vs.%s%r" % (dic, os.linesep, os.linesep, ref))
class Test_20_dumps_and_loads(TestBase):
def test_30_dumps_and_loads(self):
res = TT.loads(TT.dumps(self.cnf, "json"), "json")
self.assert_dicts_equal(res, self.cnf)
def test_30_dumps_and_loads__w_options(self):
res = TT.loads(TT.dumps(self.cnf, "json", indent=2), "json",
ensure_ascii=False)
self.assert_dicts_equal(res, self.cnf)
def test_40_loads_wo_type(self):
cnf_s = "requires:bash,zsh"
self.assertTrue(TT.loads(cnf_s) is None)
def test_42_loads_w_type_not_exist(self):
a_s = "requires:bash,zsh"
self.assertRaises(TT.UnknownProcessorTypeError,
TT.loads, a_s, "type_not_exist")
def test_44_loads_w_type__template(self):
if not anyconfig.template.SUPPORTED:
return
a_s = "requires: [{{ requires|join(', ') }}]"
reqs = dict(requires=["bash", "zsh"])
a1 = TT.loads(a_s, ac_parser="yaml", ac_template=True,
ac_context=reqs)
self.assertEqual(a1["requires"], reqs["requires"])
def test_46_loads_w_type__broken_template(self):
if not anyconfig.template.SUPPORTED:
return
a = dict(requires="{% }}", )
a_s = 'requires: "{% }}"'
a1 = TT.loads(a_s, ac_parser="yaml", ac_template=True,
ac_context={})
self.assertEqual(a1["requires"], a["requires"])
def test_48_loads_w_validation(self):
cnf_s = TT.dumps(CNF_0, "json")
scm_s = TT.dumps(SCM_0, "json")
cnf_2 = TT.loads(cnf_s, ac_parser="json", ac_context={},
ac_schema=scm_s)
self.assertEqual(cnf_2["name"], CNF_0["name"])
self.assertEqual(cnf_2["a"], CNF_0["a"])
self.assertEqual(cnf_2["b"]["b"], CNF_0["b"]["b"])
self.assertEqual(cnf_2["b"]["c"], CNF_0["b"]["c"])
def test_49_loads_w_validation_error(self):
cnf_s = """{"a": "aaa"}"""
scm_s = TT.dumps(SCM_0, "json")
cnf_2 = TT.loads(cnf_s, ac_parser="json", ac_schema=scm_s)
self.assertTrue(cnf_2 is None, cnf_2)
class Test_22_single_load(TestBase):
a_path = os.path.join(resdir(), "00-cnf.json")
cnf = CNF_1
pathlib = anyconfig.compat.pathlib
def test_10__single_load(self):
res = TT.single_load(self.a_path)
self.assert_dicts_equal(res, self.cnf)
def test_12__single_load__ac_parser(self):
res = TT.single_load(self.a_path, ac_parser="json")
self.assert_dicts_equal(res, self.cnf)
def test_20__single_load__stream(self):
res = TT.single_load(open(self.a_path), ac_parser="json")
self.assert_dicts_equal(res, self.cnf)
def test_30__single_load__pathlib(self):
if self.pathlib:
pobj = self.pathlib.Path(self.a_path)
res = TT.single_load(pobj)
self.assert_dicts_equal(res, self.cnf)
class TestBaseWithIO(TestBase):
def setUp(self):
self.workdir = tests.common.setup_workdir()
self.a_path = os.path.join(self.workdir, "a.json")
self.exp = copy.deepcopy(self.dic)
def tearDown(self):
tests.common.cleanup_workdir(self.workdir)
class Test_30_single_load(TestBaseWithIO):
def test_10_dump_and_single_load(self):
TT.dump(self.cnf, self.a_path)
self.assertTrue(os.path.exists(self.a_path))
res = TT.single_load(self.a_path)
self.assert_dicts_equal(res, self.cnf)
def test_11_dump_and_single_load__to_from_stream(self):
TT.dump(self.cnf, TT.open(self.a_path, mode='w'))
self.assertTrue(os.path.exists(self.a_path))
res = TT.single_load(TT.open(self.a_path))
self.assert_dicts_equal(res, self.cnf)
def test_12_dump_and_single_load__no_parser(self):
self.assertRaises(TT.UnknownFileTypeError,
TT.single_load, "dummy.ext_not_exist")
def test_14_single_load__ignore_missing(self):
cpath = os.path.join(os.curdir, "conf_file_should_not_exist")
assert not os.path.exists(cpath)
self.assertEqual(TT.single_load(cpath, "ini", ac_ignore_missing=True),
NULL_CNTNR)
def test_15_single_load__fail_to_render_template(self):
if not anyconfig.template.SUPPORTED:
return
cnf_s = "name: '{{ name'" # Should cause template renering error.
cpath = os.path.join(self.workdir, "a.yaml")
TT.open(cpath, mode='w').write(cnf_s)
cnf = TT.single_load(cpath, ac_template=True, ac_context=dict(a=1))
self.assertEqual(cnf["name"], "{{ name")
def test_16_single_load__template(self):
if not anyconfig.template.SUPPORTED:
return
cpath = os.path.join(self.workdir, "a.yaml")
TT.open(cpath, mode='w').write(CNF_TMPL_0)
cnf = TT.single_load(cpath, ac_template=True, ac_context=self.cnf)
self.assert_dicts_equal(cnf, self.cnf)
spath = os.path.join(self.workdir, "scm.json")
TT.dump(dict(type="integer"), spath) # Validation should fail.
cnf2 = TT.single_load(cpath, ac_template=True, ac_context=self.cnf,
ac_schema=spath)
self.assertTrue(cnf2 is None)
def test_18_single_load__templates(self):
if not anyconfig.template.SUPPORTED:
return
a_path = os.path.join(self.workdir, "a.yml")
b_path = os.path.join(self.workdir, "b.yml")
a2_path = os.path.join(self.workdir, "x/y/z", "a.yml")
open(a_path, 'w').write("{% include 'b.yml' %}")
open(b_path, 'w').write(CNF_TMPL_0)
os.makedirs(os.path.dirname(a2_path))
open(a2_path, 'w').write("a: 'xyz'")
cnf1 = TT.single_load(a_path, ac_template=True, ac_context=self.cnf)
self.assertTrue(dicts_equal(self.cnf, cnf1), str(cnf1))
cnf2 = TT.single_load(a2_path, ac_template=True)
self.assertEqual(cnf2["a"], "xyz")
def test_19_dump_and_single_load_with_validation(self):
cnf = CNF_0
scm = SCM_0
cnf_path = os.path.join(self.workdir, "cnf_19.json")
scm_path = os.path.join(self.workdir, "scm_19.json")
TT.dump(cnf, cnf_path)
TT.dump(scm, scm_path)
self.assertTrue(os.path.exists(cnf_path))
self.assertTrue(os.path.exists(scm_path))
cnf_1 = TT.single_load(cnf_path, ac_schema=scm_path)
self.assertFalse(cnf_1 is None) # Validation should succeed.
self.assertTrue(dicts_equal(cnf_1, cnf), cnf_1)
cnf_2 = cnf.copy()
cnf_2["a"] = "aaa" # It's type should be integer not string.
cnf_2_path = os.path.join(self.workdir, "cnf_19_2.json")
TT.dump(cnf_2, cnf_2_path)
self.assertTrue(os.path.exists(cnf_2_path))
cnf_3 = TT.single_load(cnf_2_path, ac_schema=scm_path)
self.assertTrue(cnf_3 is None) # Validation should fail.
def test_20_dump_and_single_load__w_ordered_option(self):
TT.dump(self.cnf, self.a_path)
self.assertTrue(os.path.exists(self.a_path))
# It works w/ JSON backend but some backend cannot keep the order of
# items and the tests might fail.
res = TT.single_load(self.a_path, ac_ordered=True)
self.assert_dicts_equal(res, self.cnf, ordered=True)
self.assertTrue(isinstance(res, anyconfig.compat.OrderedDict))
def test_22_dump_and_single_load__w_ac_dict_option(self):
TT.dump(self.cnf, self.a_path)
self.assertTrue(os.path.exists(self.a_path))
res = TT.single_load(self.a_path, ac_dict=MyODict)
self.assert_dicts_equal(res, self.cnf, ordered=True)
self.assertTrue(isinstance(res, MyODict))
class Test_32_single_load(unittest.TestCase):
cnf = CNF_XML_1
def setUp(self):
self.workdir = tests.common.setup_workdir()
def tearDown(self):
tests.common.cleanup_workdir(self.workdir)
def _load_and_dump_with_opened_files(self, filename, rmode='r', wmode='w',
**oopts):
cpath = os.path.join(self.workdir, filename)
with TT.open(cpath, 'w', **oopts) as out:
TT.dump(self.cnf, out)
self.assertTrue(_is_file_object(out))
self.assertEqual(out.mode, wmode)
with TT.open(cpath, 'rb', **oopts) as inp:
cnf1 = TT.single_load(inp)
self.assertTrue(_is_file_object(inp))
self.assertEqual(inp.mode, rmode)
cpair = (self.cnf, cnf1)
self.assertTrue(dicts_equal(*cpair), "%r vs. %r" % cpair)
def test_10_open_json_file(self):
self._load_and_dump_with_opened_files("a.json")
def test_20_open_xml_file(self):
if "xml" in TT.list_types():
self._load_and_dump_with_opened_files("a.xml", 'rb', 'wb')
def test_30_open_bson_file(self):
if "bson" in TT.list_types():
self._load_and_dump_with_opened_files("a.bson", 'rb', 'wb')
def test_40_open_yaml_file(self):
if "yaml" in TT.list_types():
self._load_and_dump_with_opened_files("a.yaml")
self._load_and_dump_with_opened_files("a.yml")
class Test_34_single_load(TestBaseWithIO):
def test_10_single_load_w_validation(self):
cnf_path = os.path.join(self.workdir, "cnf.json")
scm_path = os.path.join(self.workdir, "scm.json")
TT.dump(CNF_0, cnf_path)
TT.dump(SCM_0, scm_path)
cnf_2 = TT.single_load(cnf_path, ac_context={}, ac_schema=scm_path)
self.assertEqual(cnf_2["name"], CNF_0["name"])
self.assertEqual(cnf_2["a"], CNF_0["a"])
self.assertEqual(cnf_2["b"]["b"], CNF_0["b"]["b"])
self.assertEqual(cnf_2["b"]["c"], CNF_0["b"]["c"])
def test_20_single_load_w_query(self):
cpath = os.path.join(self.workdir, "cnf.json")
TT.dump(CNF_0, cpath)
try:
if TT.query.jmespath:
self.assertEqual(TT.single_load(cpath, ac_query="a"), 1)
self.assertEqual(TT.single_load(cpath, ac_query="b.b"), [1, 2])
self.assertEqual(TT.single_load(cpath, ac_query="b.b[1]"), 2)
self.assertEqual(TT.single_load(cpath, ac_query="b.b[1:]"),
[2])
self.assertEqual(TT.single_load(cpath, ac_query="b.b[::-1]"),
[2, 1])
self.assertEqual(TT.single_load(cpath, ac_query="length(b.b)"),
2)
except (NameError, AttributeError):
pass # jmespath is not available.
class TestBaseWithIOMultiFiles(TestBaseWithIO):
def setUp(self):
super(TestBaseWithIOMultiFiles, self).setUp()
self.b_path = os.path.join(self.workdir, "b.json")
self.g_path = os.path.join(self.workdir, "*.json")
exp = copy.deepcopy(self.upd) # Assume MS_DICTS strategy was used.
exp["b"]["c"] = self.dic["b"]["c"]
exp["name"] = self.dic["name"]
self.exp = exp
class Test_40_multi_load_with_strategies(TestBaseWithIOMultiFiles):
def _check_multi_load_with_strategy(self, exp, merge=TT.MS_DICTS):
TT.dump(self.dic, self.a_path)
TT.dump(self.upd, self.b_path)
self.assertTrue(os.path.exists(self.a_path))
self.assertTrue(os.path.exists(self.b_path))
res0 = TT.multi_load(self.g_path, ac_merge=merge)
res1 = TT.multi_load([self.g_path, self.b_path], ac_merge=merge)
self.assertTrue(res0)
self.assertTrue(res1)
self.assert_dicts_equal(res0, exp)
self.assert_dicts_equal(res1, exp)
def test_10_default_merge_strategy(self):
exp = copy.deepcopy(self.upd)
exp["b"]["c"] = self.dic["b"]["c"]
exp["name"] = self.dic["name"]
self._check_multi_load_with_strategy(exp, merge=None)
self._check_multi_load_with_strategy(exp)
def test_20_merge_dicts_and_lists(self):
exp = copy.deepcopy(self.upd)
exp["b"]["b"] = [0] + self.upd["b"]["b"]
exp["b"]["c"] = self.dic["b"]["c"]
exp["name"] = self.dic["name"]
self._check_multi_load_with_strategy(exp, merge=TT.MS_DICTS_AND_LISTS)
def test_30_merge_with_replace(self):
exp = copy.deepcopy(self.upd)
exp["name"] = self.dic["name"]
self._check_multi_load_with_strategy(exp, merge=TT.MS_REPLACE)
def test_40_merge_wo_replace(self):
exp = copy.deepcopy(self.dic)
exp["e"] = self.upd["e"]
self._check_multi_load_with_strategy(exp, merge=TT.MS_NO_REPLACE)
def test_60_wrong_merge_strategy(self):
cpath = os.path.join(self.workdir, "a.json")
TT.dump(dict(a=1, b=2), cpath)
try:
TT.multi_load([cpath, cpath], ac_merge="merge_st_not_exist")
raise RuntimeError("Wrong merge strategy was not handled!")
except ValueError:
self.assertTrue(1 == 1) # To suppress warn of pylint.
class Test_42_multi_load(TestBaseWithIOMultiFiles):
def test_10_multi_load__empty_path_list(self):
self.assertEqual(TT.multi_load([]), NULL_CNTNR)
def test_20_dump_and_multi_load__mixed_file_types(self):
c_path = os.path.join(self.workdir, "c.yml")
TT.dump(self.dic, self.a_path) # JSON
try:
TT.dump(self.upd, c_path) # YAML
except (TT.UnknownProcessorTypeError, TT.UnknownFileTypeError):
return # YAML backend is not available in this env.
self.assertTrue(os.path.exists(self.a_path))
self.assertTrue(os.path.exists(c_path))
res = TT.multi_load([self.a_path, c_path])
self.assert_dicts_equal(res, self.exp)
def test_30_dump_and_multi_load__to_from_stream(self):
TT.dump(self.dic, self.a_path)
TT.dump(self.upd, self.b_path)
res = TT.multi_load([TT.open(self.a_path), TT.open(self.b_path)])
self.assert_dicts_equal(res, self.exp)
def test_40_multi_load__ignore_missing(self):
cpath = os.path.join(os.curdir, "conf_file_should_not_exist")
assert not os.path.exists(cpath)
self.assertEqual(TT.multi_load([cpath], ac_parser="ini",
ac_ignore_missing=True),
NULL_CNTNR)
# It will be remove after 'ignore_missing' was deprecated and removed.
self.assertEqual(TT.multi_load([cpath], ac_parser="ini",
ignore_missing=True),
NULL_CNTNR)
def test_50_multi_load__templates(self):
if not anyconfig.template.SUPPORTED:
return
ctx = self.dic.copy()
TT.merge(ctx, self.upd, ac_merge=TT.MS_DICTS)
a_path = self.a_path.replace(".json", ".yml")
b_path = self.b_path.replace(".json", ".yml")
g_path = self.g_path.replace(".json", ".yml")
TT.open(a_path, mode='w').write(CNF_TMPL_1)
TT.open(b_path, mode='w').write(CNF_TMPL_2)
opts = dict(ac_merge=TT.MS_DICTS, ac_template=True, ac_context=ctx)
try:
res0 = TT.multi_load(g_path, **opts)
res1 = TT.multi_load([g_path, b_path], **opts)
except (TT.UnknownProcessorTypeError, TT.UnknownFileTypeError):
return
self.assert_dicts_equal(res0, self.exp)
self.assert_dicts_equal(res1, self.exp)
def test_60_multi_load__w_ac_dict_option(self):
TT.dump(self.dic, self.a_path)
TT.dump(self.upd, self.b_path)
res = TT.multi_load(self.g_path, ac_dict=MyODict)
self.assert_dicts_equal(res, self.exp)
self.assertTrue(isinstance(res, MyODict))
class Test_44_multi_load(TestBase):
def test_10_multi_load_w_validation_for_partial_single_config_files(self):
cpaths = [os.path.join(resdir(), "00-00-cnf.json"),
os.path.join(resdir(), "00-01-cnf.json"),
os.path.join(resdir(), "00-02-cnf.json")]
spath = os.path.join(resdir(), "00-scm.json")
cnf = TT.multi_load(cpaths, ac_schema=spath)
ref = TT.multi_load(cpaths)
self.assert_dicts_equal(cnf, ref, ordered=False)
class Test_50_load_and_dump(TestBaseWithIOMultiFiles):
def test_30_dump_and_load(self):
TT.dump(self.dic, self.a_path)
TT.dump(self.upd, self.b_path)
self.assertTrue(os.path.exists(self.a_path))
self.assertTrue(os.path.exists(self.b_path))
res = TT.load(self.a_path)
self.assert_dicts_equal(res, self.dic)
res = TT.load(self.g_path)
self.assert_dicts_equal(res, self.exp)
res = TT.load([self.a_path, self.b_path])
self.assert_dicts_equal(res, self.exp)
def test_31_dump_and_load__to_from_stream(self):
with TT.open(self.a_path, mode='w') as strm:
TT.dump(self.dic, strm)
self.assertTrue(os.path.exists(self.a_path))
with TT.open(self.a_path) as strm:
res = TT.load(strm, ac_parser="json")
self.assert_dicts_equal(res, self.dic)
def test_32_dump_and_load__w_options(self):
TT.dump(self.dic, self.a_path, indent=2)
self.assertTrue(os.path.exists(self.a_path))
TT.dump(self.upd, self.b_path, indent=2)
self.assertTrue(os.path.exists(self.b_path))
res = TT.load(self.a_path, parse_int=int)
dic = copy.deepcopy(self.dic)
self.assert_dicts_equal(res, dic)
res = TT.load(self.g_path, parse_int=int)
exp = copy.deepcopy(self.exp)
self.assert_dicts_equal(res, exp)
res = TT.load([self.a_path, self.b_path], parse_int=int)
exp = copy.deepcopy(self.exp)
self.assert_dicts_equal(res, exp)
def test_34_load__ignore_missing(self):
cpath = os.path.join(os.curdir, "conf_file_should_not_exist")
assert not os.path.exists(cpath)
self.assertEqual(TT.load([cpath], ac_parser="ini",
ignore_missing=True),
NULL_CNTNR)
def test_36_load_w_validation(self):
cnf_path = os.path.join(self.workdir, "cnf.json")
scm_path = os.path.join(self.workdir, "scm.json")
TT.dump(CNF_0, cnf_path)
TT.dump(SCM_0, scm_path)
cnf_2 = TT.load(cnf_path, ac_context={}, ac_schema=scm_path)
self.assertEqual(cnf_2["name"], CNF_0["name"])
self.assertEqual(cnf_2["a"], CNF_0["a"])
self.assertEqual(cnf_2["b"]["b"], CNF_0["b"]["b"])
self.assertEqual(cnf_2["b"]["c"], CNF_0["b"]["c"])
def test_38_load_w_validation_yaml(self):
cnf_path = os.path.join(self.workdir, "cnf.yml")
scm_path = os.path.join(self.workdir, "scm.yml")
TT.dump(CNF_0, cnf_path)
TT.dump(SCM_0, scm_path)
cnf_2 = TT.load(cnf_path, ac_context={}, ac_schema=scm_path)
self.assertEqual(cnf_2["name"], CNF_0["name"])
self.assertEqual(cnf_2["a"], CNF_0["a"])
self.assertEqual(cnf_2["b"]["b"], CNF_0["b"]["b"])
self.assertEqual(cnf_2["b"]["c"], CNF_0["b"]["c"])
def test_39_single_load__w_validation(self):
(cnf, scm) = (CNF_0, SCM_0)
cpath = os.path.join(self.workdir, "cnf.json")
spath = os.path.join(self.workdir, "scm.json")
TT.dump(cnf, cpath)
TT.dump(scm, spath)
cnf1 = TT.single_load(cpath, ac_schema=spath)
self.assert_dicts_equal(cnf, cnf1)
def test_40_load_w_query(self):
cnf_path = os.path.join(self.workdir, "cnf.json")
TT.dump(CNF_0, cnf_path)
try:
if TT.query.jmespath:
self.assertEqual(TT.load(cnf_path, ac_query="a"), 1)
self.assertEqual(TT.load(cnf_path, ac_query="b.b"), [1, 2])
self.assertEqual(TT.load(cnf_path, ac_query="b.b[1]"), 2)
self.assertEqual(TT.load(cnf_path, ac_query="b.b[1:]"), [2])
self.assertEqual(TT.load(cnf_path, ac_query="b.b[::-1]"),
[2, 1])
self.assertEqual(TT.load(cnf_path, ac_query="length(b.b)"), 2)
except (NameError, AttributeError):
pass # jmespath is not available.
# vim:sw=4:ts=4:et:
|
en
| 0.603947
|
# # Copyright (C) 2012 - 2017 <NAME> <ssato at redhat.com> # License: MIT # # pylint: disable=missing-docstring, invalid-name, no-member # suppress logging messages. name: {{ name|default('a') }} a: {{ a }} b: b: {% for x in b.b -%} - {{ x }} {% endfor %} c: {{ b.c }} a: {{ a }} b: b: {% for x in b.b -%} - {{ x }} {% endfor %} c: {{ b.c }} name: {{ name }} a: {{ a }} b: b: {% for x in b.b -%} - {{ x }} {% endfor %} d: {{ b.d }} e: 0 # python 3.x {"a": "aaa"} # Should cause template renering error. # Validation should fail. # Validation should succeed. # It's type should be integer not string. # Validation should fail. # It works w/ JSON backend but some backend cannot keep the order of # items and the tests might fail. # jmespath is not available. # Assume MS_DICTS strategy was used. # To suppress warn of pylint. # JSON # YAML # YAML backend is not available in this env. # It will be remove after 'ignore_missing' was deprecated and removed. # jmespath is not available. # vim:sw=4:ts=4:et:
| 1.860104
| 2
|
QuickPotato/harness/measurements.py
|
afparsons/QuickPotato
| 130
|
6629652
|
<reponame>afparsons/QuickPotato
from QuickPotato.statistical.data import RawData
class Metrics(object):
def __init__(self):
self.metric_average = None
self.metric_allowed_max_outlier = None
self.metric_allowed_min_outlier = None
self.metric_percentile_5th = None
self.metric_percentile_10th = None
self.metric_percentile_15th = None
self.metric_percentile_20th = None
self.metric_percentile_25th = None
self.metric_percentile_30th = None
self.metric_percentile_35th = None
self.metric_percentile_40th = None
self.metric_percentile_45th = None
self.metric_percentile_50th = None
self.metric_percentile_55th = None
self.metric_percentile_60th = None
self.metric_percentile_65th = None
self.metric_percentile_70th = None
self.metric_percentile_75th = None
self.metric_percentile_80th = None
self.metric_percentile_85th = None
self.metric_percentile_90th = None
self.metric_percentile_95th = None
def _collect_measurements(self, test_id, database_name):
"""
Parameters
----------
test_id
database_name
Returns
-------
"""
raw_data = RawData(test_id, database_name)
self.metric_average = raw_data.average_response_time
self.metric_allowed_max_outlier = raw_data.maximum_outlier_in_response_times
self.metric_allowed_min_outlier = raw_data.minimum_outlier_in_response_times
self.metric_percentile_5th = raw_data.percentile_5th
self.metric_percentile_10th = raw_data.percentile_10th
self.metric_percentile_15th = raw_data.percentile_15th
self.metric_percentile_20th = raw_data.percentile_20th
self.metric_percentile_25th = raw_data.percentile_25th
self.metric_percentile_30th = raw_data.percentile_30th
self.metric_percentile_35th = raw_data.percentile_35th
self.metric_percentile_40th = raw_data.percentile_40th
self.metric_percentile_45th = raw_data.percentile_45th
self.metric_percentile_50th = raw_data.percentile_50th
self.metric_percentile_55th = raw_data.percentile_55th
self.metric_percentile_60th = raw_data.percentile_60th
self.metric_percentile_65th = raw_data.percentile_65th
self.metric_percentile_70th = raw_data.percentile_70th
self.metric_percentile_75th = raw_data.percentile_75th
self.metric_percentile_80th = raw_data.percentile_80th
self.metric_percentile_85th = raw_data.percentile_85th
self.metric_percentile_90th = raw_data.percentile_90th
self.metric_percentile_95th = raw_data.percentile_95th
return True
@property
def threshold_measurements(self):
return {
"metric_average": self.metric_average,
"metric_allowed_max_outlier": self.metric_allowed_max_outlier,
"metric_allowed_min_outlier": self.metric_allowed_min_outlier,
"metric_percentile_5th": self.metric_percentile_5th,
"metric_percentile_10th": self.metric_percentile_10th,
"metric_percentile_15th": self.metric_percentile_15th,
"metric_percentile_20th": self.metric_percentile_20th,
"metric_percentile_25th": self.metric_percentile_25th,
"metric_percentile_30th": self.metric_percentile_30th,
"metric_percentile_35th": self.metric_percentile_35th,
"metric_percentile_40th": self.metric_percentile_40th,
"metric_percentile_45th": self.metric_percentile_45th,
"metric_percentile_50th": self.metric_percentile_50th,
"metric_percentile_55th": self.metric_percentile_55th,
"metric_percentile_60th": self.metric_percentile_60th,
"metric_percentile_65th": self.metric_percentile_65th,
"metric_percentile_70th": self.metric_percentile_70th,
"metric_percentile_75th": self.metric_percentile_75th,
"metric_percentile_80th": self.metric_percentile_80th,
"metric_percentile_85th": self.metric_percentile_85th,
"metric_percentile_90th": self.metric_percentile_90th,
"metric_percentile_95th": self.metric_percentile_95th,
}
|
from QuickPotato.statistical.data import RawData
class Metrics(object):
def __init__(self):
self.metric_average = None
self.metric_allowed_max_outlier = None
self.metric_allowed_min_outlier = None
self.metric_percentile_5th = None
self.metric_percentile_10th = None
self.metric_percentile_15th = None
self.metric_percentile_20th = None
self.metric_percentile_25th = None
self.metric_percentile_30th = None
self.metric_percentile_35th = None
self.metric_percentile_40th = None
self.metric_percentile_45th = None
self.metric_percentile_50th = None
self.metric_percentile_55th = None
self.metric_percentile_60th = None
self.metric_percentile_65th = None
self.metric_percentile_70th = None
self.metric_percentile_75th = None
self.metric_percentile_80th = None
self.metric_percentile_85th = None
self.metric_percentile_90th = None
self.metric_percentile_95th = None
def _collect_measurements(self, test_id, database_name):
"""
Parameters
----------
test_id
database_name
Returns
-------
"""
raw_data = RawData(test_id, database_name)
self.metric_average = raw_data.average_response_time
self.metric_allowed_max_outlier = raw_data.maximum_outlier_in_response_times
self.metric_allowed_min_outlier = raw_data.minimum_outlier_in_response_times
self.metric_percentile_5th = raw_data.percentile_5th
self.metric_percentile_10th = raw_data.percentile_10th
self.metric_percentile_15th = raw_data.percentile_15th
self.metric_percentile_20th = raw_data.percentile_20th
self.metric_percentile_25th = raw_data.percentile_25th
self.metric_percentile_30th = raw_data.percentile_30th
self.metric_percentile_35th = raw_data.percentile_35th
self.metric_percentile_40th = raw_data.percentile_40th
self.metric_percentile_45th = raw_data.percentile_45th
self.metric_percentile_50th = raw_data.percentile_50th
self.metric_percentile_55th = raw_data.percentile_55th
self.metric_percentile_60th = raw_data.percentile_60th
self.metric_percentile_65th = raw_data.percentile_65th
self.metric_percentile_70th = raw_data.percentile_70th
self.metric_percentile_75th = raw_data.percentile_75th
self.metric_percentile_80th = raw_data.percentile_80th
self.metric_percentile_85th = raw_data.percentile_85th
self.metric_percentile_90th = raw_data.percentile_90th
self.metric_percentile_95th = raw_data.percentile_95th
return True
@property
def threshold_measurements(self):
return {
"metric_average": self.metric_average,
"metric_allowed_max_outlier": self.metric_allowed_max_outlier,
"metric_allowed_min_outlier": self.metric_allowed_min_outlier,
"metric_percentile_5th": self.metric_percentile_5th,
"metric_percentile_10th": self.metric_percentile_10th,
"metric_percentile_15th": self.metric_percentile_15th,
"metric_percentile_20th": self.metric_percentile_20th,
"metric_percentile_25th": self.metric_percentile_25th,
"metric_percentile_30th": self.metric_percentile_30th,
"metric_percentile_35th": self.metric_percentile_35th,
"metric_percentile_40th": self.metric_percentile_40th,
"metric_percentile_45th": self.metric_percentile_45th,
"metric_percentile_50th": self.metric_percentile_50th,
"metric_percentile_55th": self.metric_percentile_55th,
"metric_percentile_60th": self.metric_percentile_60th,
"metric_percentile_65th": self.metric_percentile_65th,
"metric_percentile_70th": self.metric_percentile_70th,
"metric_percentile_75th": self.metric_percentile_75th,
"metric_percentile_80th": self.metric_percentile_80th,
"metric_percentile_85th": self.metric_percentile_85th,
"metric_percentile_90th": self.metric_percentile_90th,
"metric_percentile_95th": self.metric_percentile_95th,
}
|
en
| 0.095627
|
Parameters ---------- test_id database_name Returns -------
| 2.634385
| 3
|
wis2box/api/backend/__init__.py
|
webb-ben/wis2node
| 7
|
6629653
|
###############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
import logging
from typing import Any
from wis2box.env import API_BACKEND_TYPE, API_BACKEND_URL
from wis2box.plugin import load_plugin, PLUGINS
LOGGER = logging.getLogger(__name__)
def load_backend() -> Any:
"""
Load wis2box backend
:returns: plugin object
"""
LOGGER.debug('Loading backend')
codepath = PLUGINS['api_backend'][API_BACKEND_TYPE]
defs = {
'codepath': codepath,
'url': API_BACKEND_URL
}
return load_plugin('api_backend', defs)
|
###############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
import logging
from typing import Any
from wis2box.env import API_BACKEND_TYPE, API_BACKEND_URL
from wis2box.plugin import load_plugin, PLUGINS
LOGGER = logging.getLogger(__name__)
def load_backend() -> Any:
"""
Load wis2box backend
:returns: plugin object
"""
LOGGER.debug('Loading backend')
codepath = PLUGINS['api_backend'][API_BACKEND_TYPE]
defs = {
'codepath': codepath,
'url': API_BACKEND_URL
}
return load_plugin('api_backend', defs)
|
en
| 0.687931
|
############################################################################### # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # ############################################################################### Load wis2box backend :returns: plugin object
| 1.635516
| 2
|
bot/seasons/evergreen/game.py
|
DhruvDuseja/seasonalbot
| 0
|
6629654
|
<gh_stars>0
import difflib
import logging
import random
from datetime import datetime as dt
from enum import IntEnum
from typing import Any, Dict, List, Optional, Tuple
from aiohttp import ClientSession
from discord import Embed
from discord.ext import tasks
from discord.ext.commands import Cog, Context, group
from bot.bot import SeasonalBot
from bot.constants import STAFF_ROLES, Tokens
from bot.decorators import with_role
from bot.pagination import ImagePaginator, LinePaginator
# Base URL of IGDB API
BASE_URL = "https://api-v3.igdb.com"
HEADERS = {
"user-key": Tokens.igdb,
"Accept": "application/json"
}
logger = logging.getLogger(__name__)
# ---------
# TEMPLATES
# ---------
# Body templates
# Request body template for get_games_list
GAMES_LIST_BODY = (
"fields cover.image_id, first_release_date, total_rating, name, storyline, url, platforms.name, status,"
"involved_companies.company.name, summary, age_ratings.category, age_ratings.rating, total_rating_count;"
"{sort} {limit} {offset} {genre} {additional}"
)
# Request body template for get_companies_list
COMPANIES_LIST_BODY = (
"fields name, url, start_date, logo.image_id, developed.name, published.name, description;"
"offset {offset}; limit {limit};"
)
# Request body template for games search
SEARCH_BODY = 'fields name, url, storyline, total_rating, total_rating_count; limit 50; search "{term}";'
# Pages templates
# Game embed layout
GAME_PAGE = (
"**[{name}]({url})**\n"
"{description}"
"**Release Date:** {release_date}\n"
"**Rating:** {rating}/100 :star: (based on {rating_count} ratings)\n"
"**Platforms:** {platforms}\n"
"**Status:** {status}\n"
"**Age Ratings:** {age_ratings}\n"
"**Made by:** {made_by}\n\n"
"{storyline}"
)
# .games company command page layout
COMPANY_PAGE = (
"**[{name}]({url})**\n"
"{description}"
"**Founded:** {founded}\n"
"**Developed:** {developed}\n"
"**Published:** {published}"
)
# For .games search command line layout
GAME_SEARCH_LINE = (
"**[{name}]({url})**\n"
"{rating}/100 :star: (based on {rating_count} ratings)\n"
)
# URL templates
COVER_URL = "https://images.igdb.com/igdb/image/upload/t_cover_big/{image_id}.jpg"
LOGO_URL = "https://images.igdb.com/igdb/image/upload/t_logo_med/{image_id}.png"
# Create aliases for complex genre names
ALIASES = {
"Role-playing (rpg)": ["Role playing", "Rpg"],
"Turn-based strategy (tbs)": ["Turn based strategy", "Tbs"],
"Real time strategy (rts)": ["Real time strategy", "Rts"],
"Hack and slash/beat 'em up": ["Hack and slash"]
}
class GameStatus(IntEnum):
"""Game statuses in IGDB API."""
Released = 0
Alpha = 2
Beta = 3
Early = 4
Offline = 5
Cancelled = 6
Rumored = 7
class AgeRatingCategories(IntEnum):
"""IGDB API Age Rating categories IDs."""
ESRB = 1
PEGI = 2
class AgeRatings(IntEnum):
"""PEGI/ESRB ratings IGDB API IDs."""
Three = 1
Seven = 2
Twelve = 3
Sixteen = 4
Eighteen = 5
RP = 6
EC = 7
E = 8
E10 = 9
T = 10
M = 11
AO = 12
class Games(Cog):
"""Games Cog contains commands that collect data from IGDB."""
def __init__(self, bot: SeasonalBot):
self.bot = bot
self.http_session: ClientSession = bot.http_session
self.genres: Dict[str, int] = {}
self.refresh_genres_task.start()
@tasks.loop(hours=1.0)
async def refresh_genres_task(self) -> None:
"""Refresh genres in every hour."""
try:
await self._get_genres()
except Exception as e:
logger.warning(f"There was error while refreshing genres: {e}")
return
logger.info("Successfully refreshed genres.")
def cog_unload(self) -> None:
"""Cancel genres refreshing start when unloading Cog."""
self.refresh_genres_task.cancel()
logger.info("Successfully stopped Genres Refreshing task.")
async def _get_genres(self) -> None:
"""Create genres variable for games command."""
body = "fields name; limit 100;"
async with self.http_session.get(f"{BASE_URL}/genres", data=body, headers=HEADERS) as resp:
result = await resp.json()
genres = {genre["name"].capitalize(): genre["id"] for genre in result}
# Replace complex names with names from ALIASES
for genre_name, genre in genres.items():
if genre_name in ALIASES:
for alias in ALIASES[genre_name]:
self.genres[alias] = genre
else:
self.genres[genre_name] = genre
@group(name="games", aliases=["game"], invoke_without_command=True)
async def games(self, ctx: Context, amount: Optional[int] = 5, *, genre: Optional[str] = None) -> None:
"""
Get random game(s) by genre from IGDB. Use .games genres command to get all available genres.
Also support amount parameter, what max is 25 and min 1, default 5. Supported formats:
- .games <genre>
- .games <amount> <genre>
"""
# When user didn't specified genre, send help message
if genre is None:
await ctx.send_help("games")
return
# Capitalize genre for check
genre = "".join(genre).capitalize()
# Check for amounts, max is 25 and min 1
if not 1 <= amount <= 25:
await ctx.send("Your provided amount is out of range. Our minimum is 1 and maximum 25.")
return
# Get games listing, if genre don't exist, show error message with possibilities.
# Offset must be random, due otherwise we will get always same result (offset show in which position should
# API start returning result)
try:
games = await self.get_games_list(amount, self.genres[genre], offset=random.randint(0, 150))
except KeyError:
possibilities = "`, `".join(difflib.get_close_matches(genre, self.genres))
await ctx.send(f"Invalid genre `{genre}`. {f'Maybe you meant `{possibilities}`?' if possibilities else ''}")
return
# Create pages and paginate
pages = [await self.create_page(game) for game in games]
await ImagePaginator.paginate(pages, ctx, Embed(title=f"Random {genre.title()} Games"))
@games.command(name="top", aliases=["t"])
async def top(self, ctx: Context, amount: int = 10) -> None:
"""
Get current Top games in IGDB.
Support amount parameter. Max is 25, min is 1.
"""
if not 1 <= amount <= 25:
await ctx.send("Your provided amount is out of range. Our minimum is 1 and maximum 25.")
return
games = await self.get_games_list(amount, sort="total_rating desc",
additional_body="where total_rating >= 90; sort total_rating_count desc;")
pages = [await self.create_page(game) for game in games]
await ImagePaginator.paginate(pages, ctx, Embed(title=f"Top {amount} Games"))
@games.command(name="genres", aliases=["genre", "g"])
async def genres(self, ctx: Context) -> None:
"""Get all available genres."""
await ctx.send(f"Currently available genres: {', '.join(f'`{genre}`' for genre in self.genres)}")
@games.command(name="search", aliases=["s"])
async def search(self, ctx: Context, *, search_term: str) -> None:
"""Find games by name."""
lines = await self.search_games(search_term)
await LinePaginator.paginate(lines, ctx, Embed(title=f"Game Search Results: {search_term}"), empty=False)
@games.command(name="company", aliases=["companies"])
async def company(self, ctx: Context, amount: int = 5) -> None:
"""
Get random Game Companies companies from IGDB API.
Support amount parameter. Max is 25, min is 1.
"""
if not 1 <= amount <= 25:
await ctx.send("Your provided amount is out of range. Our minimum is 1 and maximum 25.")
return
# Get companies listing. Provide limit for limiting how much companies will be returned. Get random offset to
# get (almost) every time different companies (offset show in which position should API start returning result)
companies = await self.get_companies_list(limit=amount, offset=random.randint(0, 150))
pages = [await self.create_company_page(co) for co in companies]
await ImagePaginator.paginate(pages, ctx, Embed(title="Random Game Companies"))
@with_role(*STAFF_ROLES)
@games.command(name="refresh", aliases=["r"])
async def refresh_genres_command(self, ctx: Context) -> None:
"""Refresh .games command genres."""
try:
await self._get_genres()
except Exception as e:
await ctx.send(f"There was error while refreshing genres: `{e}`")
return
await ctx.send("Successfully refreshed genres.")
async def get_games_list(self,
amount: int,
genre: Optional[str] = None,
sort: Optional[str] = None,
additional_body: str = "",
offset: int = 0
) -> List[Dict[str, Any]]:
"""
Get list of games from IGDB API by parameters that is provided.
Amount param show how much games this get, genre is genre ID and at least one genre in game must this when
provided. Sort is sorting by specific field and direction, ex. total_rating desc/asc (total_rating is field,
desc/asc is direction). Additional_body is field where you can pass extra search parameters. Offset show start
position in API.
"""
# Create body of IGDB API request, define fields, sorting, offset, limit and genre
params = {
"sort": f"sort {sort};" if sort else "",
"limit": f"limit {amount};",
"offset": f"offset {offset};" if offset else "",
"genre": f"where genres = ({genre});" if genre else "",
"additional": additional_body
}
body = GAMES_LIST_BODY.format(**params)
# Do request to IGDB API, create headers, URL, define body, return result
async with self.http_session.get(url=f"{BASE_URL}/games", data=body, headers=HEADERS) as resp:
return await resp.json()
async def create_page(self, data: Dict[str, Any]) -> Tuple[str, str]:
"""Create content of Game Page."""
# Create cover image URL from template
url = COVER_URL.format(**{"image_id": data["cover"]["image_id"] if "cover" in data else ""})
# Get release date separately with checking
release_date = dt.utcfromtimestamp(data["first_release_date"]).date() if "first_release_date" in data else "?"
# Create Age Ratings value
rating = ", ".join(f"{AgeRatingCategories(age['category']).name} {AgeRatings(age['rating']).name}"
for age in data["age_ratings"]) if "age_ratings" in data else "?"
companies = [c["company"]["name"] for c in data["involved_companies"]] if "involved_companies" in data else "?"
# Create formatting for template page
formatting = {
"name": data["name"],
"url": data["url"],
"description": f"{data['summary']}\n\n" if "summary" in data else "\n",
"release_date": release_date,
"rating": round(data["total_rating"] if "total_rating" in data else 0, 2),
"rating_count": data["total_rating_count"] if "total_rating_count" in data else "?",
"platforms": ", ".join(platform["name"] for platform in data["platforms"]) if "platforms" in data else "?",
"status": GameStatus(data["status"]).name if "status" in data else "?",
"age_ratings": rating,
"made_by": ", ".join(companies),
"storyline": data["storyline"] if "storyline" in data else ""
}
page = GAME_PAGE.format(**formatting)
return page, url
async def search_games(self, search_term: str) -> List[str]:
"""Search game from IGDB API by string, return listing of pages."""
lines = []
# Define request body of IGDB API request and do request
body = SEARCH_BODY.format(**{"term": search_term})
async with self.http_session.get(url=f"{BASE_URL}/games", data=body, headers=HEADERS) as resp:
data = await resp.json()
# Loop over games, format them to good format, make line and append this to total lines
for game in data:
formatting = {
"name": game["name"],
"url": game["url"],
"rating": round(game["total_rating"] if "total_rating" in game else 0, 2),
"rating_count": game["total_rating_count"] if "total_rating" in game else "?"
}
line = GAME_SEARCH_LINE.format(**formatting)
lines.append(line)
return lines
async def get_companies_list(self, limit: int, offset: int = 0) -> List[Dict[str, Any]]:
"""
Get random Game Companies from IGDB API.
Limit is parameter, that show how much movies this should return, offset show in which position should API start
returning results.
"""
# Create request body from template
body = COMPANIES_LIST_BODY.format(**{
"limit": limit,
"offset": offset
})
async with self.http_session.get(url=f"{BASE_URL}/companies", data=body, headers=HEADERS) as resp:
return await resp.json()
async def create_company_page(self, data: Dict[str, Any]) -> Tuple[str, str]:
"""Create good formatted Game Company page."""
# Generate URL of company logo
url = LOGO_URL.format(**{"image_id": data["logo"]["image_id"] if "logo" in data else ""})
# Try to get found date of company
founded = dt.utcfromtimestamp(data["start_date"]).date() if "start_date" in data else "?"
# Generate list of games, that company have developed or published
developed = ", ".join(game["name"] for game in data["developed"]) if "developed" in data else "?"
published = ", ".join(game["name"] for game in data["published"]) if "published" in data else "?"
formatting = {
"name": data["name"],
"url": data["url"],
"description": f"{data['description']}\n\n" if "description" in data else "\n",
"founded": founded,
"developed": developed,
"published": published
}
page = COMPANY_PAGE.format(**formatting)
return page, url
def setup(bot: SeasonalBot) -> None:
"""Add/Load Games cog."""
# Check does IGDB API key exist, if not, log warning and don't load cog
if not Tokens.igdb:
logger.warning("No IGDB API key. Not loading Games cog.")
return
bot.add_cog(Games(bot))
|
import difflib
import logging
import random
from datetime import datetime as dt
from enum import IntEnum
from typing import Any, Dict, List, Optional, Tuple
from aiohttp import ClientSession
from discord import Embed
from discord.ext import tasks
from discord.ext.commands import Cog, Context, group
from bot.bot import SeasonalBot
from bot.constants import STAFF_ROLES, Tokens
from bot.decorators import with_role
from bot.pagination import ImagePaginator, LinePaginator
# Base URL of IGDB API
BASE_URL = "https://api-v3.igdb.com"
HEADERS = {
"user-key": Tokens.igdb,
"Accept": "application/json"
}
logger = logging.getLogger(__name__)
# ---------
# TEMPLATES
# ---------
# Body templates
# Request body template for get_games_list
GAMES_LIST_BODY = (
"fields cover.image_id, first_release_date, total_rating, name, storyline, url, platforms.name, status,"
"involved_companies.company.name, summary, age_ratings.category, age_ratings.rating, total_rating_count;"
"{sort} {limit} {offset} {genre} {additional}"
)
# Request body template for get_companies_list
COMPANIES_LIST_BODY = (
"fields name, url, start_date, logo.image_id, developed.name, published.name, description;"
"offset {offset}; limit {limit};"
)
# Request body template for games search
SEARCH_BODY = 'fields name, url, storyline, total_rating, total_rating_count; limit 50; search "{term}";'
# Pages templates
# Game embed layout
GAME_PAGE = (
"**[{name}]({url})**\n"
"{description}"
"**Release Date:** {release_date}\n"
"**Rating:** {rating}/100 :star: (based on {rating_count} ratings)\n"
"**Platforms:** {platforms}\n"
"**Status:** {status}\n"
"**Age Ratings:** {age_ratings}\n"
"**Made by:** {made_by}\n\n"
"{storyline}"
)
# .games company command page layout
COMPANY_PAGE = (
"**[{name}]({url})**\n"
"{description}"
"**Founded:** {founded}\n"
"**Developed:** {developed}\n"
"**Published:** {published}"
)
# For .games search command line layout
GAME_SEARCH_LINE = (
"**[{name}]({url})**\n"
"{rating}/100 :star: (based on {rating_count} ratings)\n"
)
# URL templates
COVER_URL = "https://images.igdb.com/igdb/image/upload/t_cover_big/{image_id}.jpg"
LOGO_URL = "https://images.igdb.com/igdb/image/upload/t_logo_med/{image_id}.png"
# Create aliases for complex genre names
ALIASES = {
"Role-playing (rpg)": ["Role playing", "Rpg"],
"Turn-based strategy (tbs)": ["Turn based strategy", "Tbs"],
"Real time strategy (rts)": ["Real time strategy", "Rts"],
"Hack and slash/beat 'em up": ["Hack and slash"]
}
class GameStatus(IntEnum):
"""Game statuses in IGDB API."""
Released = 0
Alpha = 2
Beta = 3
Early = 4
Offline = 5
Cancelled = 6
Rumored = 7
class AgeRatingCategories(IntEnum):
"""IGDB API Age Rating categories IDs."""
ESRB = 1
PEGI = 2
class AgeRatings(IntEnum):
"""PEGI/ESRB ratings IGDB API IDs."""
Three = 1
Seven = 2
Twelve = 3
Sixteen = 4
Eighteen = 5
RP = 6
EC = 7
E = 8
E10 = 9
T = 10
M = 11
AO = 12
class Games(Cog):
"""Games Cog contains commands that collect data from IGDB."""
def __init__(self, bot: SeasonalBot):
self.bot = bot
self.http_session: ClientSession = bot.http_session
self.genres: Dict[str, int] = {}
self.refresh_genres_task.start()
@tasks.loop(hours=1.0)
async def refresh_genres_task(self) -> None:
"""Refresh genres in every hour."""
try:
await self._get_genres()
except Exception as e:
logger.warning(f"There was error while refreshing genres: {e}")
return
logger.info("Successfully refreshed genres.")
def cog_unload(self) -> None:
"""Cancel genres refreshing start when unloading Cog."""
self.refresh_genres_task.cancel()
logger.info("Successfully stopped Genres Refreshing task.")
async def _get_genres(self) -> None:
"""Create genres variable for games command."""
body = "fields name; limit 100;"
async with self.http_session.get(f"{BASE_URL}/genres", data=body, headers=HEADERS) as resp:
result = await resp.json()
genres = {genre["name"].capitalize(): genre["id"] for genre in result}
# Replace complex names with names from ALIASES
for genre_name, genre in genres.items():
if genre_name in ALIASES:
for alias in ALIASES[genre_name]:
self.genres[alias] = genre
else:
self.genres[genre_name] = genre
@group(name="games", aliases=["game"], invoke_without_command=True)
async def games(self, ctx: Context, amount: Optional[int] = 5, *, genre: Optional[str] = None) -> None:
"""
Get random game(s) by genre from IGDB. Use .games genres command to get all available genres.
Also support amount parameter, what max is 25 and min 1, default 5. Supported formats:
- .games <genre>
- .games <amount> <genre>
"""
# When user didn't specified genre, send help message
if genre is None:
await ctx.send_help("games")
return
# Capitalize genre for check
genre = "".join(genre).capitalize()
# Check for amounts, max is 25 and min 1
if not 1 <= amount <= 25:
await ctx.send("Your provided amount is out of range. Our minimum is 1 and maximum 25.")
return
# Get games listing, if genre don't exist, show error message with possibilities.
# Offset must be random, due otherwise we will get always same result (offset show in which position should
# API start returning result)
try:
games = await self.get_games_list(amount, self.genres[genre], offset=random.randint(0, 150))
except KeyError:
possibilities = "`, `".join(difflib.get_close_matches(genre, self.genres))
await ctx.send(f"Invalid genre `{genre}`. {f'Maybe you meant `{possibilities}`?' if possibilities else ''}")
return
# Create pages and paginate
pages = [await self.create_page(game) for game in games]
await ImagePaginator.paginate(pages, ctx, Embed(title=f"Random {genre.title()} Games"))
@games.command(name="top", aliases=["t"])
async def top(self, ctx: Context, amount: int = 10) -> None:
"""
Get current Top games in IGDB.
Support amount parameter. Max is 25, min is 1.
"""
if not 1 <= amount <= 25:
await ctx.send("Your provided amount is out of range. Our minimum is 1 and maximum 25.")
return
games = await self.get_games_list(amount, sort="total_rating desc",
additional_body="where total_rating >= 90; sort total_rating_count desc;")
pages = [await self.create_page(game) for game in games]
await ImagePaginator.paginate(pages, ctx, Embed(title=f"Top {amount} Games"))
@games.command(name="genres", aliases=["genre", "g"])
async def genres(self, ctx: Context) -> None:
"""Get all available genres."""
await ctx.send(f"Currently available genres: {', '.join(f'`{genre}`' for genre in self.genres)}")
@games.command(name="search", aliases=["s"])
async def search(self, ctx: Context, *, search_term: str) -> None:
"""Find games by name."""
lines = await self.search_games(search_term)
await LinePaginator.paginate(lines, ctx, Embed(title=f"Game Search Results: {search_term}"), empty=False)
@games.command(name="company", aliases=["companies"])
async def company(self, ctx: Context, amount: int = 5) -> None:
"""
Get random Game Companies companies from IGDB API.
Support amount parameter. Max is 25, min is 1.
"""
if not 1 <= amount <= 25:
await ctx.send("Your provided amount is out of range. Our minimum is 1 and maximum 25.")
return
# Get companies listing. Provide limit for limiting how much companies will be returned. Get random offset to
# get (almost) every time different companies (offset show in which position should API start returning result)
companies = await self.get_companies_list(limit=amount, offset=random.randint(0, 150))
pages = [await self.create_company_page(co) for co in companies]
await ImagePaginator.paginate(pages, ctx, Embed(title="Random Game Companies"))
@with_role(*STAFF_ROLES)
@games.command(name="refresh", aliases=["r"])
async def refresh_genres_command(self, ctx: Context) -> None:
"""Refresh .games command genres."""
try:
await self._get_genres()
except Exception as e:
await ctx.send(f"There was error while refreshing genres: `{e}`")
return
await ctx.send("Successfully refreshed genres.")
async def get_games_list(self,
amount: int,
genre: Optional[str] = None,
sort: Optional[str] = None,
additional_body: str = "",
offset: int = 0
) -> List[Dict[str, Any]]:
"""
Get list of games from IGDB API by parameters that is provided.
Amount param show how much games this get, genre is genre ID and at least one genre in game must this when
provided. Sort is sorting by specific field and direction, ex. total_rating desc/asc (total_rating is field,
desc/asc is direction). Additional_body is field where you can pass extra search parameters. Offset show start
position in API.
"""
# Create body of IGDB API request, define fields, sorting, offset, limit and genre
params = {
"sort": f"sort {sort};" if sort else "",
"limit": f"limit {amount};",
"offset": f"offset {offset};" if offset else "",
"genre": f"where genres = ({genre});" if genre else "",
"additional": additional_body
}
body = GAMES_LIST_BODY.format(**params)
# Do request to IGDB API, create headers, URL, define body, return result
async with self.http_session.get(url=f"{BASE_URL}/games", data=body, headers=HEADERS) as resp:
return await resp.json()
async def create_page(self, data: Dict[str, Any]) -> Tuple[str, str]:
"""Create content of Game Page."""
# Create cover image URL from template
url = COVER_URL.format(**{"image_id": data["cover"]["image_id"] if "cover" in data else ""})
# Get release date separately with checking
release_date = dt.utcfromtimestamp(data["first_release_date"]).date() if "first_release_date" in data else "?"
# Create Age Ratings value
rating = ", ".join(f"{AgeRatingCategories(age['category']).name} {AgeRatings(age['rating']).name}"
for age in data["age_ratings"]) if "age_ratings" in data else "?"
companies = [c["company"]["name"] for c in data["involved_companies"]] if "involved_companies" in data else "?"
# Create formatting for template page
formatting = {
"name": data["name"],
"url": data["url"],
"description": f"{data['summary']}\n\n" if "summary" in data else "\n",
"release_date": release_date,
"rating": round(data["total_rating"] if "total_rating" in data else 0, 2),
"rating_count": data["total_rating_count"] if "total_rating_count" in data else "?",
"platforms": ", ".join(platform["name"] for platform in data["platforms"]) if "platforms" in data else "?",
"status": GameStatus(data["status"]).name if "status" in data else "?",
"age_ratings": rating,
"made_by": ", ".join(companies),
"storyline": data["storyline"] if "storyline" in data else ""
}
page = GAME_PAGE.format(**formatting)
return page, url
async def search_games(self, search_term: str) -> List[str]:
"""Search game from IGDB API by string, return listing of pages."""
lines = []
# Define request body of IGDB API request and do request
body = SEARCH_BODY.format(**{"term": search_term})
async with self.http_session.get(url=f"{BASE_URL}/games", data=body, headers=HEADERS) as resp:
data = await resp.json()
# Loop over games, format them to good format, make line and append this to total lines
for game in data:
formatting = {
"name": game["name"],
"url": game["url"],
"rating": round(game["total_rating"] if "total_rating" in game else 0, 2),
"rating_count": game["total_rating_count"] if "total_rating" in game else "?"
}
line = GAME_SEARCH_LINE.format(**formatting)
lines.append(line)
return lines
async def get_companies_list(self, limit: int, offset: int = 0) -> List[Dict[str, Any]]:
"""
Get random Game Companies from IGDB API.
Limit is parameter, that show how much movies this should return, offset show in which position should API start
returning results.
"""
# Create request body from template
body = COMPANIES_LIST_BODY.format(**{
"limit": limit,
"offset": offset
})
async with self.http_session.get(url=f"{BASE_URL}/companies", data=body, headers=HEADERS) as resp:
return await resp.json()
async def create_company_page(self, data: Dict[str, Any]) -> Tuple[str, str]:
"""Create good formatted Game Company page."""
# Generate URL of company logo
url = LOGO_URL.format(**{"image_id": data["logo"]["image_id"] if "logo" in data else ""})
# Try to get found date of company
founded = dt.utcfromtimestamp(data["start_date"]).date() if "start_date" in data else "?"
# Generate list of games, that company have developed or published
developed = ", ".join(game["name"] for game in data["developed"]) if "developed" in data else "?"
published = ", ".join(game["name"] for game in data["published"]) if "published" in data else "?"
formatting = {
"name": data["name"],
"url": data["url"],
"description": f"{data['description']}\n\n" if "description" in data else "\n",
"founded": founded,
"developed": developed,
"published": published
}
page = COMPANY_PAGE.format(**formatting)
return page, url
def setup(bot: SeasonalBot) -> None:
"""Add/Load Games cog."""
# Check does IGDB API key exist, if not, log warning and don't load cog
if not Tokens.igdb:
logger.warning("No IGDB API key. Not loading Games cog.")
return
bot.add_cog(Games(bot))
|
en
| 0.78681
|
# Base URL of IGDB API # --------- # TEMPLATES # --------- # Body templates # Request body template for get_games_list # Request body template for get_companies_list # Request body template for games search # Pages templates # Game embed layout # .games company command page layout # For .games search command line layout # URL templates # Create aliases for complex genre names Game statuses in IGDB API. IGDB API Age Rating categories IDs. PEGI/ESRB ratings IGDB API IDs. Games Cog contains commands that collect data from IGDB. Refresh genres in every hour. Cancel genres refreshing start when unloading Cog. Create genres variable for games command. # Replace complex names with names from ALIASES Get random game(s) by genre from IGDB. Use .games genres command to get all available genres. Also support amount parameter, what max is 25 and min 1, default 5. Supported formats: - .games <genre> - .games <amount> <genre> # When user didn't specified genre, send help message # Capitalize genre for check # Check for amounts, max is 25 and min 1 # Get games listing, if genre don't exist, show error message with possibilities. # Offset must be random, due otherwise we will get always same result (offset show in which position should # API start returning result) # Create pages and paginate Get current Top games in IGDB. Support amount parameter. Max is 25, min is 1. Get all available genres. Find games by name. Get random Game Companies companies from IGDB API. Support amount parameter. Max is 25, min is 1. # Get companies listing. Provide limit for limiting how much companies will be returned. Get random offset to # get (almost) every time different companies (offset show in which position should API start returning result) Refresh .games command genres. Get list of games from IGDB API by parameters that is provided. Amount param show how much games this get, genre is genre ID and at least one genre in game must this when provided. Sort is sorting by specific field and direction, ex. total_rating desc/asc (total_rating is field, desc/asc is direction). Additional_body is field where you can pass extra search parameters. Offset show start position in API. # Create body of IGDB API request, define fields, sorting, offset, limit and genre # Do request to IGDB API, create headers, URL, define body, return result Create content of Game Page. # Create cover image URL from template # Get release date separately with checking # Create Age Ratings value # Create formatting for template page Search game from IGDB API by string, return listing of pages. # Define request body of IGDB API request and do request # Loop over games, format them to good format, make line and append this to total lines Get random Game Companies from IGDB API. Limit is parameter, that show how much movies this should return, offset show in which position should API start returning results. # Create request body from template Create good formatted Game Company page. # Generate URL of company logo # Try to get found date of company # Generate list of games, that company have developed or published Add/Load Games cog. # Check does IGDB API key exist, if not, log warning and don't load cog
| 2.130162
| 2
|
leetcode/weekly164/3_search_suggestions.py
|
jan25/code_sorted
| 2
|
6629655
|
'''
https://leetcode.com/contest/weekly-contest-164/problems/search-suggestions-system/
'''
class Solution:
def suggestedProducts(self, products: List[str], searchWord: str) -> List[List[str]]:
n = len(products)
sp = sorted(products)
p = []
i, j = 0, n - 1
for ic, c in enumerate(searchWord):
while i < n and i <= j and (ic >= len(sp[i]) or sp[i][ic] != c): i += 1
while j >= 0 and i <= j and (ic >= len(sp[j]) or sp[j][ic] != c): j -= 1
if i <= j: p.append(sp[i:min(i + 3, j + 1, n)])
else: p.append([])
return p
|
'''
https://leetcode.com/contest/weekly-contest-164/problems/search-suggestions-system/
'''
class Solution:
def suggestedProducts(self, products: List[str], searchWord: str) -> List[List[str]]:
n = len(products)
sp = sorted(products)
p = []
i, j = 0, n - 1
for ic, c in enumerate(searchWord):
while i < n and i <= j and (ic >= len(sp[i]) or sp[i][ic] != c): i += 1
while j >= 0 and i <= j and (ic >= len(sp[j]) or sp[j][ic] != c): j -= 1
if i <= j: p.append(sp[i:min(i + 3, j + 1, n)])
else: p.append([])
return p
|
en
| 0.644506
|
https://leetcode.com/contest/weekly-contest-164/problems/search-suggestions-system/
| 3.191074
| 3
|
soko/locations/models.py
|
njugunanduati/soko
| 1
|
6629656
|
# -*- coding: utf-8 -*-
"""location models."""
import datetime as dt
from soko.database import Column, Model, SurrogatePK, db, reference_col, relationship
class Locations(SurrogatePK, Model):
__tablename__ = 'locations'
user = Column(db.Integer, nullable=False)
latitude = Column(db.String(150), nullable=False)
longitude = Column(db.String(150), nullable=False)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
def __init__(self, user, latitude, longitude):
self.user = user
self.latitude = latitude
self.longitude = longitude
def __repr__(self):
return '<Location %r>' % self.user + self.latitude + self.longitude
def serialize(self):
return {"id": self.id, "user": self.user, "latitude": self.latitude, "longitude": self.longitude}
|
# -*- coding: utf-8 -*-
"""location models."""
import datetime as dt
from soko.database import Column, Model, SurrogatePK, db, reference_col, relationship
class Locations(SurrogatePK, Model):
__tablename__ = 'locations'
user = Column(db.Integer, nullable=False)
latitude = Column(db.String(150), nullable=False)
longitude = Column(db.String(150), nullable=False)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
def __init__(self, user, latitude, longitude):
self.user = user
self.latitude = latitude
self.longitude = longitude
def __repr__(self):
return '<Location %r>' % self.user + self.latitude + self.longitude
def serialize(self):
return {"id": self.id, "user": self.user, "latitude": self.latitude, "longitude": self.longitude}
|
en
| 0.83139
|
# -*- coding: utf-8 -*- location models.
| 2.621102
| 3
|
manga_db/db/migrations/0003_id_onpage_text_add_2_extr.py
|
nilfoer/mangadb
| 3
|
6629657
|
import sqlite3
from typing import Dict, Union
date = '2021-01-29'
# need to turn of foreign key constraints in order to rename ExternalInfo
requires_foreign_keys_off = True
SUPPORTED_SITES: Dict[Union[int, str], Union[int, str]] = {
# site id, site name
1: "tsumino.com",
2: "nhentai.net",
3: "MangaDex",
4: "Manganelo",
5: "Toonily",
# site name, id
"tsumino.com": 1,
"nhentai.net": 2,
"MangaDex": 3,
"Manganelo": 4,
"Toonily": 5,
}
LANG_IDS: Dict[Union[str, int], Union[str, int]] = {
1: "Unknown",
2: "English",
3: "Japanese",
4: "Chinese",
5: "Korean",
6: "Arabic",
7: "Bengali",
8: "Bulgarian",
9: "Burmese",
10: "Catalan",
11: "Czech",
12: "Danish",
13: "Dutch",
14: "Filipino",
15: "Finnish",
16: "French",
17: "German",
18: "Greek",
19: "Hungarian",
20: "Indonesian",
21: "Italian",
22: "Lithuanian",
23: "Malay",
24: "Mongolian",
25: "Persian",
26: "Polish",
27: "Portuguese",
28: "Romanian",
29: "Russian",
30: "Serbo-Croatian",
31: "Spanish",
32: "Swedish",
33: "Thai",
34: "Turkish",
35: "Ukrainian",
36: "Vietnamese",
"Unknown": 1,
"English": 2,
"Japanese": 3,
"Chinese": 4,
"Korean": 5,
"Arabic": 6,
"Bengali": 7,
"Bulgarian": 8,
"Burmese": 9,
"Catalan": 10,
"Czech": 11,
"Danish": 12,
"Dutch": 13,
"Filipino": 14,
"Finnish": 15,
"French": 16,
"German": 17,
"Greek": 18,
"Hungarian": 19,
"Indonesian": 20,
"Italian": 21,
"Lithuanian": 22,
"Malay": 23,
"Mongolian": 24,
"Persian": 25,
"Polish": 26,
"Portuguese": 27,
"Romanian": 28,
"Russian": 29,
"Serbo-Croatian": 30,
"Spanish": 31,
"Swedish": 32,
"Thai": 33,
"Turkish": 34,
"Ukrainian": 35,
"Vietnamese": 36,
}
def upgrade(db_con: sqlite3.Connection, db_filename: str):
c = db_con.cursor()
c.execute("ALTER TABLE ExternalInfo RENAME TO temp_table")
# change id_onpage from INT to TEXT so we can store information for external
# pages that somehow don't use integers for their ids
# IMPORTANT don't change the order so we can use INSERT without specifying cols
c.execute("""
CREATE TABLE ExternalInfo(
id INTEGER PRIMARY KEY ASC,
book_id INTEGER NOT NULL,
id_onpage TEXT NOT NULL,
imported_from INTEGER NOT NULL,
upload_date DATE NOT NULL,
uploader TEXT,
censor_id INTEGER NOT NULL,
rating REAL,
ratings INTEGER, -- number of users that rated the book
favorites INTEGER,
downloaded INTEGER NOT NULL,
last_update DATE NOT NULL,
outdated INTEGER NOT NULL,
FOREIGN KEY (book_id) REFERENCES Books(id)
ON DELETE CASCADE,
FOREIGN KEY (imported_from) REFERENCES Sites(id)
ON DELETE RESTRICT,
FOREIGN KEY (censor_id) REFERENCES Censorship(id)
ON DELETE RESTRICT
)""")
c.execute("DROP INDEX idx_id_onpage_imported_from")
# recreate index
c.execute("CREATE INDEX idx_id_onpage_imported_from ON ExternalInfo (id_onpage, imported_from)")
# re-populate table
c.execute("INSERT INTO ExternalInfo SELECT * FROM temp_table")
c.execute("DROP TABLE temp_table")
#
# add Unkown to Languages as id 1
#
id_lang = [(i, v) for i, v in LANG_IDS.items() if type(i) is int]
book_id_lang_name = c.execute("""
SELECT Books.id, Languages.name FROM Books
JOIN Languages ON Languages.id = Books.language_id""").fetchall()
c.execute("DELETE FROM Languages")
c.executemany("INSERT INTO Languages(id, name) VALUES (?, ?)", id_lang)
lang_map = {k: v for k, v in LANG_IDS.items() if type(k) is str}
# update language_id based on name one by one, since updating them in bulk
# gets too complicated for a simple migration
for book_id, lang_name in book_id_lang_name:
# check for custom lang
if lang_name not in lang_map:
c.execute("INSERT INTO Languages(name) VALUES (?)", (lang_name,))
lang_map[lang_name] = c.lastrowid
lang_id = lang_map[lang_name]
c.execute("UPDATE Books SET language_id = ? WHERE id = ?", (lang_id, book_id))
assert c.execute(
"SELECT * FROM Languages WHERE id <= ?", (len(id_lang),)).fetchall() == id_lang
#
# add manganelo to Sites
#
site_id_name = [(key, val) for key, val in SUPPORTED_SITES.items()
if type(key) is int]
prev_sites = c.execute("SELECT * FROM Sites").fetchall()
start = len(prev_sites)
c.executemany("INSERT INTO Sites(id, name) VALUES (?, ?)", site_id_name[start:])
assert c.execute("SELECT * FROM Sites").fetchall() == site_id_name
|
import sqlite3
from typing import Dict, Union
date = '2021-01-29'
# need to turn of foreign key constraints in order to rename ExternalInfo
requires_foreign_keys_off = True
SUPPORTED_SITES: Dict[Union[int, str], Union[int, str]] = {
# site id, site name
1: "tsumino.com",
2: "nhentai.net",
3: "MangaDex",
4: "Manganelo",
5: "Toonily",
# site name, id
"tsumino.com": 1,
"nhentai.net": 2,
"MangaDex": 3,
"Manganelo": 4,
"Toonily": 5,
}
LANG_IDS: Dict[Union[str, int], Union[str, int]] = {
1: "Unknown",
2: "English",
3: "Japanese",
4: "Chinese",
5: "Korean",
6: "Arabic",
7: "Bengali",
8: "Bulgarian",
9: "Burmese",
10: "Catalan",
11: "Czech",
12: "Danish",
13: "Dutch",
14: "Filipino",
15: "Finnish",
16: "French",
17: "German",
18: "Greek",
19: "Hungarian",
20: "Indonesian",
21: "Italian",
22: "Lithuanian",
23: "Malay",
24: "Mongolian",
25: "Persian",
26: "Polish",
27: "Portuguese",
28: "Romanian",
29: "Russian",
30: "Serbo-Croatian",
31: "Spanish",
32: "Swedish",
33: "Thai",
34: "Turkish",
35: "Ukrainian",
36: "Vietnamese",
"Unknown": 1,
"English": 2,
"Japanese": 3,
"Chinese": 4,
"Korean": 5,
"Arabic": 6,
"Bengali": 7,
"Bulgarian": 8,
"Burmese": 9,
"Catalan": 10,
"Czech": 11,
"Danish": 12,
"Dutch": 13,
"Filipino": 14,
"Finnish": 15,
"French": 16,
"German": 17,
"Greek": 18,
"Hungarian": 19,
"Indonesian": 20,
"Italian": 21,
"Lithuanian": 22,
"Malay": 23,
"Mongolian": 24,
"Persian": 25,
"Polish": 26,
"Portuguese": 27,
"Romanian": 28,
"Russian": 29,
"Serbo-Croatian": 30,
"Spanish": 31,
"Swedish": 32,
"Thai": 33,
"Turkish": 34,
"Ukrainian": 35,
"Vietnamese": 36,
}
def upgrade(db_con: sqlite3.Connection, db_filename: str):
c = db_con.cursor()
c.execute("ALTER TABLE ExternalInfo RENAME TO temp_table")
# change id_onpage from INT to TEXT so we can store information for external
# pages that somehow don't use integers for their ids
# IMPORTANT don't change the order so we can use INSERT without specifying cols
c.execute("""
CREATE TABLE ExternalInfo(
id INTEGER PRIMARY KEY ASC,
book_id INTEGER NOT NULL,
id_onpage TEXT NOT NULL,
imported_from INTEGER NOT NULL,
upload_date DATE NOT NULL,
uploader TEXT,
censor_id INTEGER NOT NULL,
rating REAL,
ratings INTEGER, -- number of users that rated the book
favorites INTEGER,
downloaded INTEGER NOT NULL,
last_update DATE NOT NULL,
outdated INTEGER NOT NULL,
FOREIGN KEY (book_id) REFERENCES Books(id)
ON DELETE CASCADE,
FOREIGN KEY (imported_from) REFERENCES Sites(id)
ON DELETE RESTRICT,
FOREIGN KEY (censor_id) REFERENCES Censorship(id)
ON DELETE RESTRICT
)""")
c.execute("DROP INDEX idx_id_onpage_imported_from")
# recreate index
c.execute("CREATE INDEX idx_id_onpage_imported_from ON ExternalInfo (id_onpage, imported_from)")
# re-populate table
c.execute("INSERT INTO ExternalInfo SELECT * FROM temp_table")
c.execute("DROP TABLE temp_table")
#
# add Unkown to Languages as id 1
#
id_lang = [(i, v) for i, v in LANG_IDS.items() if type(i) is int]
book_id_lang_name = c.execute("""
SELECT Books.id, Languages.name FROM Books
JOIN Languages ON Languages.id = Books.language_id""").fetchall()
c.execute("DELETE FROM Languages")
c.executemany("INSERT INTO Languages(id, name) VALUES (?, ?)", id_lang)
lang_map = {k: v for k, v in LANG_IDS.items() if type(k) is str}
# update language_id based on name one by one, since updating them in bulk
# gets too complicated for a simple migration
for book_id, lang_name in book_id_lang_name:
# check for custom lang
if lang_name not in lang_map:
c.execute("INSERT INTO Languages(name) VALUES (?)", (lang_name,))
lang_map[lang_name] = c.lastrowid
lang_id = lang_map[lang_name]
c.execute("UPDATE Books SET language_id = ? WHERE id = ?", (lang_id, book_id))
assert c.execute(
"SELECT * FROM Languages WHERE id <= ?", (len(id_lang),)).fetchall() == id_lang
#
# add manganelo to Sites
#
site_id_name = [(key, val) for key, val in SUPPORTED_SITES.items()
if type(key) is int]
prev_sites = c.execute("SELECT * FROM Sites").fetchall()
start = len(prev_sites)
c.executemany("INSERT INTO Sites(id, name) VALUES (?, ?)", site_id_name[start:])
assert c.execute("SELECT * FROM Sites").fetchall() == site_id_name
|
en
| 0.70641
|
# need to turn of foreign key constraints in order to rename ExternalInfo # site id, site name # site name, id # change id_onpage from INT to TEXT so we can store information for external # pages that somehow don't use integers for their ids # IMPORTANT don't change the order so we can use INSERT without specifying cols CREATE TABLE ExternalInfo( id INTEGER PRIMARY KEY ASC, book_id INTEGER NOT NULL, id_onpage TEXT NOT NULL, imported_from INTEGER NOT NULL, upload_date DATE NOT NULL, uploader TEXT, censor_id INTEGER NOT NULL, rating REAL, ratings INTEGER, -- number of users that rated the book favorites INTEGER, downloaded INTEGER NOT NULL, last_update DATE NOT NULL, outdated INTEGER NOT NULL, FOREIGN KEY (book_id) REFERENCES Books(id) ON DELETE CASCADE, FOREIGN KEY (imported_from) REFERENCES Sites(id) ON DELETE RESTRICT, FOREIGN KEY (censor_id) REFERENCES Censorship(id) ON DELETE RESTRICT ) # recreate index # re-populate table # # add Unkown to Languages as id 1 # SELECT Books.id, Languages.name FROM Books JOIN Languages ON Languages.id = Books.language_id # update language_id based on name one by one, since updating them in bulk # gets too complicated for a simple migration # check for custom lang # # add manganelo to Sites #
| 2.606292
| 3
|
contrib/HumanSeg/val.py
|
windstamp/PaddleSeg
| 56
|
6629658
|
<reponame>windstamp/PaddleSeg<filename>contrib/HumanSeg/val.py
# coding: utf8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from datasets.dataset import Dataset
import transforms
import models
def parse_args():
parser = argparse.ArgumentParser(description='HumanSeg training')
parser.add_argument(
'--model_dir',
dest='model_dir',
help='Model path for evaluating',
type=str,
default='output/best_model')
parser.add_argument(
'--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument(
'--val_list',
dest='val_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=128)
parser.add_argument(
"--image_shape",
dest="image_shape",
help="The image shape for net inputs.",
nargs=2,
default=[192, 192],
type=int)
return parser.parse_args()
def evaluate(args):
eval_transforms = transforms.Compose(
[transforms.Resize(args.image_shape),
transforms.Normalize()])
eval_dataset = Dataset(
data_dir=args.data_dir,
file_list=args.val_list,
transforms=eval_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=False)
model = models.load_model(args.model_dir)
model.evaluate(eval_dataset, args.batch_size)
if __name__ == '__main__':
args = parse_args()
evaluate(args)
|
# coding: utf8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from datasets.dataset import Dataset
import transforms
import models
def parse_args():
parser = argparse.ArgumentParser(description='HumanSeg training')
parser.add_argument(
'--model_dir',
dest='model_dir',
help='Model path for evaluating',
type=str,
default='output/best_model')
parser.add_argument(
'--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument(
'--val_list',
dest='val_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=128)
parser.add_argument(
"--image_shape",
dest="image_shape",
help="The image shape for net inputs.",
nargs=2,
default=[192, 192],
type=int)
return parser.parse_args()
def evaluate(args):
eval_transforms = transforms.Compose(
[transforms.Resize(args.image_shape),
transforms.Normalize()])
eval_dataset = Dataset(
data_dir=args.data_dir,
file_list=args.val_list,
transforms=eval_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=False)
model = models.load_model(args.model_dir)
model.evaluate(eval_dataset, args.batch_size)
if __name__ == '__main__':
args = parse_args()
evaluate(args)
|
en
| 0.845867
|
# coding: utf8 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
| 2.221617
| 2
|
sandbox/offer/admin.py
|
Bastilla123/shop2
| 0
|
6629659
|
from oscar.apps.offer.admin import * # noqa
|
from oscar.apps.offer.admin import * # noqa
|
none
| 1
| 1.02125
| 1
|
|
dj_tasks/settings.py
|
cfc603/dj-tasks
| 0
|
6629660
|
# -*- coding: utf-8 -*-
from django.conf import settings
DJTASKS_DELETE_INTERVAL = getattr(settings, "DJTASKS_DELETE_INTERVAL", 14)
DJTASKS_LOCK = getattr(settings, "DJTASKS_LOCK", True)
DJTASKS_LOCK_ID = getattr(settings, "DJTASKS_LOCK_ID", "")
DJTASKS_SLEEP_INTERVAL = getattr(settings, "DJTASKS_SLEEP_INTERVAL", 10)
DJTASKS_TASKS = getattr(settings, "DJTASKS_TASKS", [])
DJTASKS_TASKS += ["dj_tasks.tasks.DeleteOldTaskRunTask"]
|
# -*- coding: utf-8 -*-
from django.conf import settings
DJTASKS_DELETE_INTERVAL = getattr(settings, "DJTASKS_DELETE_INTERVAL", 14)
DJTASKS_LOCK = getattr(settings, "DJTASKS_LOCK", True)
DJTASKS_LOCK_ID = getattr(settings, "DJTASKS_LOCK_ID", "")
DJTASKS_SLEEP_INTERVAL = getattr(settings, "DJTASKS_SLEEP_INTERVAL", 10)
DJTASKS_TASKS = getattr(settings, "DJTASKS_TASKS", [])
DJTASKS_TASKS += ["dj_tasks.tasks.DeleteOldTaskRunTask"]
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.718958
| 2
|
app/http/middleware/VerifyEmailMiddleware.py
|
nakano16180/pingcrm-masonite
| 95
|
6629661
|
<filename>app/http/middleware/VerifyEmailMiddleware.py
"""Verify Email Middleware."""
from masonite.request import Request
class VerifyEmailMiddleware:
"""Middleware To Check If The User Has Verified Their Email."""
def __init__(self, request: Request):
"""Inject Any Dependencies From The Service Container.
Arguments:
request {masonite.request.Request} -- The Masonite request class.
"""
self.request = request
def before(self):
"""Run This Middleware Before The Route Executes."""
user = self.request.user()
if user and user.verified_at is None:
self.request.redirect('/email/verify')
def after(self):
"""Run This Middleware After The Route Executes."""
pass
|
<filename>app/http/middleware/VerifyEmailMiddleware.py
"""Verify Email Middleware."""
from masonite.request import Request
class VerifyEmailMiddleware:
"""Middleware To Check If The User Has Verified Their Email."""
def __init__(self, request: Request):
"""Inject Any Dependencies From The Service Container.
Arguments:
request {masonite.request.Request} -- The Masonite request class.
"""
self.request = request
def before(self):
"""Run This Middleware Before The Route Executes."""
user = self.request.user()
if user and user.verified_at is None:
self.request.redirect('/email/verify')
def after(self):
"""Run This Middleware After The Route Executes."""
pass
|
en
| 0.646141
|
Verify Email Middleware. Middleware To Check If The User Has Verified Their Email. Inject Any Dependencies From The Service Container. Arguments: request {masonite.request.Request} -- The Masonite request class. Run This Middleware Before The Route Executes. Run This Middleware After The Route Executes.
| 3.3619
| 3
|
maintenancemanagement/apps.py
|
Open-CMMS/openCMMS_backend
| 3
|
6629662
|
<gh_stars>1-10
"""This files describes our app."""
from django.apps import AppConfig
class MaintenancemanagementConfig(AppConfig):
"""This is the app class."""
name = 'maintenancemanagement'
|
"""This files describes our app."""
from django.apps import AppConfig
class MaintenancemanagementConfig(AppConfig):
"""This is the app class."""
name = 'maintenancemanagement'
|
en
| 0.874027
|
This files describes our app. This is the app class.
| 1.537813
| 2
|
Exercises/047.py
|
GuilhermeRds1921/Python3-Guanabara
| 0
|
6629663
|
# Develop a program that shows on the screen
# all the even numbers that are in the range between 1 and 50.
for i in range(1,51):
if i % 2 == 0:
print(i, end = ' ')
|
# Develop a program that shows on the screen
# all the even numbers that are in the range between 1 and 50.
for i in range(1,51):
if i % 2 == 0:
print(i, end = ' ')
|
en
| 0.972855
|
# Develop a program that shows on the screen # all the even numbers that are in the range between 1 and 50.
| 4.052197
| 4
|
test.py
|
iAngLi/net
| 0
|
6629664
|
<reponame>iAngLi/net
import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from operator import itemgetter, attrgetter
from ResNet import ResNet18
from MyTestData import MyDataset
# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 参数设置,使得我们能够手动输入命令行参数,就是让风格变得和Linux命令行差不多
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Testing')
parser.add_argument('--outf', default='./model/', help='folder to output images and model checkpoints') # 输出结果保存路径
args = parser.parse_args()
# 超参数设置
EPOCH = 135 # 遍历数据集次数 135
pre_epoch = 76 # 定义已经遍历数据集的次数
BATCH_SIZE = 32 # 批处理尺寸(batch_size)
LR = 0.01 # 学习率
# 准备数据集并预处理
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4), # 先四周填充0,在把图像随机裁剪成32*32
transforms.RandomHorizontalFlip(), # 图像一半的概率翻转,一半的概率不翻转
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), # R,G,B每层的归一化用到的均值和方差
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
#testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
testset = MyDataset('./data/atest', train = False, transform = transform_test);
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
# Cifar-10的标签
classes = ('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# 模型定义-ResNet
net = ResNet18().to(device)
net_path = os.path.join(args.outf, 'net_final.pth')
net.load_state_dict(torch.load(net_path))#, map_location='cpu'
# 训练
if __name__ == "__main__":
if not os.path.exists(args.outf):
os.makedirs(args.outf)
best_acc = 85 # 2 初始化best test accuracy
print("Start testing, Resnet-18!") # 定义遍历数据集的次数
result = []
num =0;
with open("A.txt", "w") as f:
# 全部训练完打印label
print("Waiting Test!")
with torch.no_grad():
for data in testloader:
net.eval()
images, labels = data
images = images.to(device)
outputs = net(images)
# 取得分最高的那个类 (outputs.data的索引号)
_, predicted = torch.max(outputs.data, 1)
for i in range(len(labels)):
b= labels[i].split('.')[0]#不带后缀的文件名
result.append((int(b),int(predicted[i].item())))
num = num +1;
if((num % 100) == 0):
print("%d \n" % (num))
result = sorted(result, key=itemgetter(0), reverse=False)
for i in range(len(result)):
f.write("%d.png %d\n" % (result[i][0], result[i][1]))
f.flush()
f.close()
print("Test Finished")
|
import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from operator import itemgetter, attrgetter
from ResNet import ResNet18
from MyTestData import MyDataset
# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 参数设置,使得我们能够手动输入命令行参数,就是让风格变得和Linux命令行差不多
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Testing')
parser.add_argument('--outf', default='./model/', help='folder to output images and model checkpoints') # 输出结果保存路径
args = parser.parse_args()
# 超参数设置
EPOCH = 135 # 遍历数据集次数 135
pre_epoch = 76 # 定义已经遍历数据集的次数
BATCH_SIZE = 32 # 批处理尺寸(batch_size)
LR = 0.01 # 学习率
# 准备数据集并预处理
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4), # 先四周填充0,在把图像随机裁剪成32*32
transforms.RandomHorizontalFlip(), # 图像一半的概率翻转,一半的概率不翻转
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), # R,G,B每层的归一化用到的均值和方差
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
#testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
testset = MyDataset('./data/atest', train = False, transform = transform_test);
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False, num_workers=0)
# Cifar-10的标签
classes = ('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# 模型定义-ResNet
net = ResNet18().to(device)
net_path = os.path.join(args.outf, 'net_final.pth')
net.load_state_dict(torch.load(net_path))#, map_location='cpu'
# 训练
if __name__ == "__main__":
if not os.path.exists(args.outf):
os.makedirs(args.outf)
best_acc = 85 # 2 初始化best test accuracy
print("Start testing, Resnet-18!") # 定义遍历数据集的次数
result = []
num =0;
with open("A.txt", "w") as f:
# 全部训练完打印label
print("Waiting Test!")
with torch.no_grad():
for data in testloader:
net.eval()
images, labels = data
images = images.to(device)
outputs = net(images)
# 取得分最高的那个类 (outputs.data的索引号)
_, predicted = torch.max(outputs.data, 1)
for i in range(len(labels)):
b= labels[i].split('.')[0]#不带后缀的文件名
result.append((int(b),int(predicted[i].item())))
num = num +1;
if((num % 100) == 0):
print("%d \n" % (num))
result = sorted(result, key=itemgetter(0), reverse=False)
for i in range(len(result)):
f.write("%d.png %d\n" % (result[i][0], result[i][1]))
f.flush()
f.close()
print("Test Finished")
|
zh
| 0.795893
|
# 定义是否使用GPU # 参数设置,使得我们能够手动输入命令行参数,就是让风格变得和Linux命令行差不多 # 输出结果保存路径 # 超参数设置 # 遍历数据集次数 135 # 定义已经遍历数据集的次数 # 批处理尺寸(batch_size) # 学习率 # 准备数据集并预处理 # 先四周填充0,在把图像随机裁剪成32*32 # 图像一半的概率翻转,一半的概率不翻转 # R,G,B每层的归一化用到的均值和方差 #testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test) # Cifar-10的标签 # 模型定义-ResNet #, map_location='cpu' # 训练 # 2 初始化best test accuracy # 定义遍历数据集的次数 # 全部训练完打印label # 取得分最高的那个类 (outputs.data的索引号) #不带后缀的文件名
| 2.391907
| 2
|
src/cholesky_adaptive_gp.py
|
QianWanghhu/IES-FF
| 0
|
6629665
|
<gh_stars>0
#!/usr/bin/env python
from multiprocessing import Pool
import numpy as np
import os
import matplotlib.pyplot as plt
from functools import partial
import time
import copy
import pandas as pd
import pickle
from scipy import stats
# from scipy.optimize import root
from scipy.optimize import bisect
from sklearn.gaussian_process.kernels import RBF, \
Matern
from pyapprox.density import tensor_product_pdf
from pyapprox.gaussian_process import CholeskySampler, AdaptiveGaussianProcess
from pyapprox.low_discrepancy_sequences import transformed_halton_sequence
from pyapprox.utilities import compute_f_divergence, \
get_tensor_product_quadrature_rule
from pyapprox.probability_measure_sampling import generate_independent_random_samples_deprecated, rejection_sampling
from pyapprox.visualization import get_meshgrid_function_data
from pyapprox import generate_independent_random_samples
from pyapprox.variables import IndependentMultivariateRandomVariable
from pyapprox.variable_transformations import AffineRandomVariableTransformation
import matplotlib as mpl
from matplotlib import rc
import spotpy as sp
from funcs.read_data import variables_prep, file_settings
from funcs.modeling_funcs import vs_settings, \
modeling_settings, paralell_vs, obtain_initials, change_param_values
mpl.rcParams['font.size'] = 16
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['text.usetex'] = False # use latex for all text handling
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.format'] = 'pdf' # gives best resolution plots
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
mpl.rcParams['legend.fontsize'] = 16
# print mpl.rcParams.keys()
mpl.rcParams['text.latex.preamble'] = \
r'\usepackage{siunitx}\usepackage{amsmath}\usepackage{amssymb}'
# Create the copy of models and veneer list
project_name = 'MW_BASE_RC10.rsproj'
veneer_name = 'vcmd45\\FlowMatters.Source.VeneerCmd.exe'
first_port=15000; num_copies = 1
_, things_to_record, _, _, _ = modeling_settings()
processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name)
vs_list = vs_settings(ports, things_to_record)
# obtain the initial values of parameters
initial_values = obtain_initials(vs_list[0])
def run_source_lsq(vars, vs_list=vs_list):
"""
Script used to run_source and return the output file.
The function is called by AdaptiveLejaPCE.
"""
from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble
import spotpy as sp
print('Read Parameters')
parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index')
# Define objective functions
# Use annual or monthly loads
def timeseries_sum(df, temp_scale = 'annual'):
"""
Obtain the sum of timeseries of different temporal scale.
temp_scale: str, default is 'Y', monthly using 'M'
"""
assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.'
if temp_scale == 'monthly':
sum_126001A = df.resample('M').sum()
else:
month_126001A = df.resample('M').sum()
sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year),
columns=df.columns)
for i in range(sum_126001A.shape[0]):
sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum()
return sum_126001A
# End timeseries_sum()
# Define functions for the objective functions
def cal_obj(x_obs, x_mod, obj_type = 'nse'):
obj_map = {'nse': sp.objectivefunctions.nashsutcliffe,
'rmse': sp.objectivefunctions.rmse,
'pbias': sp.objectivefunctions.pbias
}
obj = []
assert x_obs.shape[0] == x_mod.shape[0], "Observation and simultion should be of the same length."
for k in range(x_mod.shape[1]):
obj.append(obj_map[obj_type](x_obs, x_mod[:, k].reshape(x_mod.shape[0], 1)))
if obj[0] == 0: obj[0] = 1e-8
obj = np.array(obj)
if obj_type =='nse':
obj = 1 - obj
obj = obj.reshape(obj.shape[0], 1)
print(obj)
return obj
# import observation if the output.txt requires the use of obs.
date_range = pd.to_datetime(['2009/07/01', '2018/06/30'])
observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date')
observed_din.index = pd.to_datetime(observed_din.index)
observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x)
# loop over the vars and try to use parallel
parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short)
for i in range(vars.shape[1]):
parameter_df.iloc[i] = vars[:, i]
# set the time period of the results
retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')]
# define the modeling period and the recording variables
_, _, criteria, start_date, end_date = modeling_settings()
din = generate_observation_ensemble(vs_list,
criteria, start_date, end_date, parameter_df, retrieve_time)
# obtain the sum at a given temporal scale
# din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]])
din_126001A = timeseries_sum(din, temp_scale = 'annual')
obs_din = timeseries_sum(observed_din, temp_scale = 'annual')
din_126001A = pd.DataFrame(din_126001A,dtype='float').values
obs_din = pd.DataFrame(obs_din,dtype='float').values
# breakpoint()
obj = cal_obj(obs_din, din_126001A, obj_type = 'nse')
print(f'Finish {obj.shape[0]} run')
return obj
# END run_source_lsq()
def convergence_study(kernel, function, sampler,
num_vars, generate_samples, num_new_samples,
update_kernel_scale_num_samples,
noise_level=0, return_samples=False,
norm=np.linalg.norm, callback=None, gp_kernel=None):
# dirty hack to include two GP kernel types (for IVAR)
if hasattr(kernel, "__len__"):
# in this case, kernel is an array and we assume to have received
# two kernels
sampler_kernel = kernel[1]
kernel = kernel[0]
else:
sampler_kernel = kernel
# Instantiate a Gaussian Process model
if gp_kernel is None:
gp_kernel = kernel
gp = AdaptiveGaussianProcess(
gp_kernel, n_restarts_optimizer=10, alpha=1e-12)
gp.setup(function, sampler)
if hasattr(sampler, "set_gaussian_process"):
sampler.set_gaussian_process(gp)
print('sampler kernel', kernel, 'gp kernel', gp_kernel)
# Mesh the input space for evaluations of the real function,
# the prediction and its MSE
num_samples = np.cumsum(num_new_samples)
num_steps = num_new_samples.shape[0]
errors = np.empty(num_steps, dtype=float)
nsamples = np.empty(num_steps, dtype=int)
sample_step = 0
optimizer_step = 0
while sample_step < num_steps:
if hasattr(gp, 'kernel_'):
# if using const * rbf + noise kernel
# kernel.theta = gp.kernel_.k1.k2.theta
# if using const * rbf
# kernel.theta = gp.kernel_.k2.theta
# if using rbf
kernel.theta = gp.kernel_.theta
# Fit to data using Maximum Likelihood Estimation of the parameters
# if True:
if ((optimizer_step >= update_kernel_scale_num_samples.shape[0]) or
(sampler.ntraining_samples <
update_kernel_scale_num_samples[optimizer_step])):
gp.optimizer = None
else:
gp.optimizer = "fmin_l_bfgs_b"
optimizer_step += 1
flag = gp.refine(np.sum(num_new_samples[:sample_step+1]))
# allow points to be added to gp more often than gp is evaluated for
# validation
if sampler.ntraining_samples >= num_samples[sample_step]:
# validation_values = function(validation_samples).squeeze()
# Compute error
# assert pred_values.shape == validation_values.shape
# error = norm(pred_values-validation_values)/norm(validation_values)
if callback is not None:
callback(gp)
print(gp.kernel_)
if sample_step >=1:
# Compute error
gp_load = pickle.load(open(f'gp_{sample_step - 1}.pkl', "rb"))
validation_sub = sampler.training_samples[:, num_samples[sample_step - 1]:num_samples[sample_step]]
pred_values = gp_load(validation_sub, return_cov=False).squeeze()
values_sub = gp(validation_sub, return_cov=False).squeeze()
error_gp_comp = norm(pred_values-values_sub)/norm(values_sub)
print('-----------error_gp_comp---------', error_gp_comp)
print('N', sampler.ntraining_samples, 'Error', error_gp_comp)
if sample_step >= 1:
errors[sample_step -1] = error_gp_comp
nsamples[sample_step - 1] = num_samples[sample_step -1]
pickle.dump(gp, open(f'gp_{sample_step}.pkl', "wb"))
sample_step += 1
if flag > 0:
errors, nsamples = errors[:sample_step], nsamples[:sample_step]
print('Terminating study. Points are becoming ill conditioned')
break
if return_samples:
return errors, nsamples, sampler.training_samples[:, 0:num_samples[sample_step - 1]]
return errors, nsamples
def unnormalized_posterior(gp, prior_pdf, samples, temper_param=1):
prior_vals = prior_pdf(samples).squeeze()
gp_vals = gp.predict(samples.T).squeeze()
breakpoint()
unnormalized_posterior_vals = prior_vals*np.exp(-gp_vals)**temper_param
return unnormalized_posterior_vals
class BayesianInferenceCholeskySampler(CholeskySampler):
def __init__(self, prior_pdf, num_vars,
num_candidate_samples, variables,
max_num_samples=None, generate_random_samples=None,
temper=True, true_nll=None):
self.prior_pdf = prior_pdf
if not temper:
self.temper_param = 1
else:
self.temper_param = 0
self.true_nll = true_nll
self.gp = None
super().__init__(num_vars, num_candidate_samples, variables,
None, generate_random_samples)
def set_gaussian_process(self, gp):
self.gp = gp
# Qian: understand the purpose of function increment_temper_param()
def increment_temper_param(self, num_training_samples):
# samples = np.random.uniform(0, 1, (self.nvars, 1000))
samples = generate_independent_random_samples(self.variables, 1000)
density_vals_prev = self.weight_function(samples)
def objective(beta):
new_weight_function = partial(
unnormalized_posterior, self.gp, self.prior_pdf,
temper_param=beta)
density_vals = new_weight_function(samples)
II = np.where(density_vals_prev > 1e-15)[0]
JJ = np.where(density_vals_prev < 1e-15)[0]
assert len(np.where(density_vals[JJ] > 1e-15)[0]) == 0
ratio = np.zeros(samples.shape[1])
ratio[II] = density_vals[II]/density_vals_prev[II]
obj = ratio.std()/ratio.mean()
return obj
print('temper parameter', self.temper_param)
x0 = self.temper_param+1e-4
# result = root(lambda b: objective(b)-1, x0)
# x_opt = result.x
x_opt = bisect(lambda b: objective(b)-1, x0, 1)
self.temper_param = x_opt
def __call__(self, num_samples):
if self.gp is None:
raise ValueError("must call self.set_gaussian_process()")
if self.ntraining_samples > 0 and self.temper_param < 1:
self.increment_temper_param(self.training_samples)
assert self.temper_param <= 1
if self.ntraining_samples == 0:
weight_function = self.prior_pdf
else:
if self.true_nll is not None:
def weight_function(x): return self.prior_pdf(x)*np.exp(
-self.true_nll(x)[:, 0])**self.temper_param
else:
weight_function = partial(
unnormalized_posterior, self.gp, self.prior_pdf,
temper_param=self.temper_param)
self.set_weight_function(weight_function)
samples, flag = super().__call__(num_samples)
return samples, flag
def get_prior_samples(num_vars, variables, nsamples):
rosenbrock_samples = generate_independent_random_samples(variables, nsamples)
return rosenbrock_samples
def bayesian_inference_example():
# read parameter distributions
datapath = file_settings()[1]
para_info = pd.read_csv(datapath + 'Parameters-PCE.csv')
# define the variables for PCE
param_file = file_settings()[-1]
# Must set variables if not using uniform prior on [0,1]^D
# variables = None
ind_vars, variables = variables_prep(param_file, product_uniform='uniform', dummy=False)
var_trans = AffineRandomVariableTransformation(variables, enforce_bounds=True)
init_scale = 0.1 # used to define length_scale for the kernel
num_vars = variables.nvars
num_candidate_samples = 20000
num_new_samples = np.asarray([2]+[5]*6+[15]*6+[25]*8)
nvalidation_samples = 10000
from scipy import stats
# breakpoint()
prior_pdf = partial(tensor_product_pdf,
univariate_pdfs=[partial(stats.beta.pdf, a=1, b=1, scale=ind_vars[ii].args[1]) for ii in range(num_vars)])
# Get validation samples from prior
rosenbrock_samples = get_prior_samples(num_vars, variables, nvalidation_samples + num_candidate_samples)
def generate_random_samples(nsamples, idx=0):
assert idx+nsamples <= rosenbrock_samples.shape[1]
return rosenbrock_samples[:, idx:idx+nsamples]
generate_validation_samples = partial(
generate_random_samples, nvalidation_samples,
idx=num_candidate_samples)
def get_filename(method, fixed_scale):
filename = 'bayes-example-%s-d-%d-n-%d.npz' % (
method, num_vars, num_candidate_samples)
if not fixed_scale:
filename = filename[:-4]+'-opt.npz'
return filename
# defining kernel
length_scale = init_scale*np.ones(num_vars, dtype=float)
kernel = RBF(length_scale, (5e-2, 1))
# this is the one Qian should use. The others are for comparision only
adaptive_cholesky_sampler = BayesianInferenceCholeskySampler(
prior_pdf, num_vars, num_candidate_samples, variables,
max_num_samples=num_new_samples.sum(),
generate_random_samples=None)
adaptive_cholesky_sampler.set_kernel(copy.deepcopy(kernel))
samplers = [adaptive_cholesky_sampler]
methods = ['Learning-Weighted-Cholesky-b']
labels = [r'$\mathrm{Adapted\;Weighted\;Cholesky}$']
fixed_scales = [False]
for sampler, method, fixed_scale in zip(samplers, methods, fixed_scales):
filename = get_filename(method, fixed_scale)
print(filename)
if os.path.exists(filename):
continue
if fixed_scale:
update_kernel_scale_num_samples = np.empty(0)
else:
update_kernel_scale_num_samples = np.cumsum(num_new_samples)
cond_nums = []
temper_params = []
def callback(gp):
cond_nums.append(np.linalg.cond(gp.L_.dot(gp.L_.T)))
if hasattr(sampler, 'temper_param'):
temper_params.append(sampler.temper_param)
print(temper_params)
errors, nsamples, samples = convergence_study(
kernel, run_source_lsq, sampler, num_vars,
generate_validation_samples, num_new_samples,
update_kernel_scale_num_samples, callback=callback,
return_samples=True)
np.savez(filename, nsamples=nsamples, errors=errors,
cond_nums=np.asarray(cond_nums), samples=samples,
temper_params=np.asarray(temper_params))
fig, axs = plt.subplots(1, 3, figsize=(3*8, 6), sharey=False)
styles = ['-']
# styles = ['k-','r-.','b--','g:']
for method, label, ls, fixed_scale in zip(
methods, labels, styles, fixed_scales):
filename = get_filename(method, fixed_scale)
data = np.load(filename)
nsamples, errors = data['nsamples'][:-1], data['errors'][:-1]
temper_params, cond_nums = data['temper_params'][1:-1], data['cond_nums'][:-1]
axs[0].loglog(nsamples, errors, ls=ls, label=label)
axs[1].loglog(nsamples, cond_nums, ls=ls, label=label)
axs[2].semilogy(np.arange(1, nsamples.shape[0]),
temper_params, 'k-o')
axs[2].set_xlabel(r'$\mathrm{Iteration}$ $j$')
axs[2].set_ylabel(r'$\beta_j$')
for ii in range(2):
axs[ii].set_xlabel(r'$m$')
axs[ii].set_xlim(10, 1000)
axs[0].set_ylabel(r'$\tilde{\epsilon}_{\omega,2}$', rotation=90)
ylim0 = axs[0].get_ylim()
ylim1 = axs[1].get_ylim()
ylim = [min(ylim0[0], ylim1[0]), max(ylim0[1], ylim1[1])]
axs[0].set_ylim(ylim)
axs[1].set_ylim(ylim)
axs[1].set_ylabel(r'$\kappa$', rotation=90)
figname = 'bayes_example_comparison_%d.pdf' % num_vars
axs[0].legend()
plt.savefig(figname)
if __name__ == '__main__':
try:
import sklearn
except:
msg = 'Install sklearn using pip install sklearn'
raise Exception(msg)
bayesian_inference_example()
|
#!/usr/bin/env python
from multiprocessing import Pool
import numpy as np
import os
import matplotlib.pyplot as plt
from functools import partial
import time
import copy
import pandas as pd
import pickle
from scipy import stats
# from scipy.optimize import root
from scipy.optimize import bisect
from sklearn.gaussian_process.kernels import RBF, \
Matern
from pyapprox.density import tensor_product_pdf
from pyapprox.gaussian_process import CholeskySampler, AdaptiveGaussianProcess
from pyapprox.low_discrepancy_sequences import transformed_halton_sequence
from pyapprox.utilities import compute_f_divergence, \
get_tensor_product_quadrature_rule
from pyapprox.probability_measure_sampling import generate_independent_random_samples_deprecated, rejection_sampling
from pyapprox.visualization import get_meshgrid_function_data
from pyapprox import generate_independent_random_samples
from pyapprox.variables import IndependentMultivariateRandomVariable
from pyapprox.variable_transformations import AffineRandomVariableTransformation
import matplotlib as mpl
from matplotlib import rc
import spotpy as sp
from funcs.read_data import variables_prep, file_settings
from funcs.modeling_funcs import vs_settings, \
modeling_settings, paralell_vs, obtain_initials, change_param_values
mpl.rcParams['font.size'] = 16
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['text.usetex'] = False # use latex for all text handling
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.format'] = 'pdf' # gives best resolution plots
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
mpl.rcParams['legend.fontsize'] = 16
# print mpl.rcParams.keys()
mpl.rcParams['text.latex.preamble'] = \
r'\usepackage{siunitx}\usepackage{amsmath}\usepackage{amssymb}'
# Create the copy of models and veneer list
project_name = 'MW_BASE_RC10.rsproj'
veneer_name = 'vcmd45\\FlowMatters.Source.VeneerCmd.exe'
first_port=15000; num_copies = 1
_, things_to_record, _, _, _ = modeling_settings()
processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name)
vs_list = vs_settings(ports, things_to_record)
# obtain the initial values of parameters
initial_values = obtain_initials(vs_list[0])
def run_source_lsq(vars, vs_list=vs_list):
"""
Script used to run_source and return the output file.
The function is called by AdaptiveLejaPCE.
"""
from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble
import spotpy as sp
print('Read Parameters')
parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index')
# Define objective functions
# Use annual or monthly loads
def timeseries_sum(df, temp_scale = 'annual'):
"""
Obtain the sum of timeseries of different temporal scale.
temp_scale: str, default is 'Y', monthly using 'M'
"""
assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.'
if temp_scale == 'monthly':
sum_126001A = df.resample('M').sum()
else:
month_126001A = df.resample('M').sum()
sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year),
columns=df.columns)
for i in range(sum_126001A.shape[0]):
sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum()
return sum_126001A
# End timeseries_sum()
# Define functions for the objective functions
def cal_obj(x_obs, x_mod, obj_type = 'nse'):
obj_map = {'nse': sp.objectivefunctions.nashsutcliffe,
'rmse': sp.objectivefunctions.rmse,
'pbias': sp.objectivefunctions.pbias
}
obj = []
assert x_obs.shape[0] == x_mod.shape[0], "Observation and simultion should be of the same length."
for k in range(x_mod.shape[1]):
obj.append(obj_map[obj_type](x_obs, x_mod[:, k].reshape(x_mod.shape[0], 1)))
if obj[0] == 0: obj[0] = 1e-8
obj = np.array(obj)
if obj_type =='nse':
obj = 1 - obj
obj = obj.reshape(obj.shape[0], 1)
print(obj)
return obj
# import observation if the output.txt requires the use of obs.
date_range = pd.to_datetime(['2009/07/01', '2018/06/30'])
observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date')
observed_din.index = pd.to_datetime(observed_din.index)
observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x)
# loop over the vars and try to use parallel
parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short)
for i in range(vars.shape[1]):
parameter_df.iloc[i] = vars[:, i]
# set the time period of the results
retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')]
# define the modeling period and the recording variables
_, _, criteria, start_date, end_date = modeling_settings()
din = generate_observation_ensemble(vs_list,
criteria, start_date, end_date, parameter_df, retrieve_time)
# obtain the sum at a given temporal scale
# din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]])
din_126001A = timeseries_sum(din, temp_scale = 'annual')
obs_din = timeseries_sum(observed_din, temp_scale = 'annual')
din_126001A = pd.DataFrame(din_126001A,dtype='float').values
obs_din = pd.DataFrame(obs_din,dtype='float').values
# breakpoint()
obj = cal_obj(obs_din, din_126001A, obj_type = 'nse')
print(f'Finish {obj.shape[0]} run')
return obj
# END run_source_lsq()
def convergence_study(kernel, function, sampler,
num_vars, generate_samples, num_new_samples,
update_kernel_scale_num_samples,
noise_level=0, return_samples=False,
norm=np.linalg.norm, callback=None, gp_kernel=None):
# dirty hack to include two GP kernel types (for IVAR)
if hasattr(kernel, "__len__"):
# in this case, kernel is an array and we assume to have received
# two kernels
sampler_kernel = kernel[1]
kernel = kernel[0]
else:
sampler_kernel = kernel
# Instantiate a Gaussian Process model
if gp_kernel is None:
gp_kernel = kernel
gp = AdaptiveGaussianProcess(
gp_kernel, n_restarts_optimizer=10, alpha=1e-12)
gp.setup(function, sampler)
if hasattr(sampler, "set_gaussian_process"):
sampler.set_gaussian_process(gp)
print('sampler kernel', kernel, 'gp kernel', gp_kernel)
# Mesh the input space for evaluations of the real function,
# the prediction and its MSE
num_samples = np.cumsum(num_new_samples)
num_steps = num_new_samples.shape[0]
errors = np.empty(num_steps, dtype=float)
nsamples = np.empty(num_steps, dtype=int)
sample_step = 0
optimizer_step = 0
while sample_step < num_steps:
if hasattr(gp, 'kernel_'):
# if using const * rbf + noise kernel
# kernel.theta = gp.kernel_.k1.k2.theta
# if using const * rbf
# kernel.theta = gp.kernel_.k2.theta
# if using rbf
kernel.theta = gp.kernel_.theta
# Fit to data using Maximum Likelihood Estimation of the parameters
# if True:
if ((optimizer_step >= update_kernel_scale_num_samples.shape[0]) or
(sampler.ntraining_samples <
update_kernel_scale_num_samples[optimizer_step])):
gp.optimizer = None
else:
gp.optimizer = "fmin_l_bfgs_b"
optimizer_step += 1
flag = gp.refine(np.sum(num_new_samples[:sample_step+1]))
# allow points to be added to gp more often than gp is evaluated for
# validation
if sampler.ntraining_samples >= num_samples[sample_step]:
# validation_values = function(validation_samples).squeeze()
# Compute error
# assert pred_values.shape == validation_values.shape
# error = norm(pred_values-validation_values)/norm(validation_values)
if callback is not None:
callback(gp)
print(gp.kernel_)
if sample_step >=1:
# Compute error
gp_load = pickle.load(open(f'gp_{sample_step - 1}.pkl', "rb"))
validation_sub = sampler.training_samples[:, num_samples[sample_step - 1]:num_samples[sample_step]]
pred_values = gp_load(validation_sub, return_cov=False).squeeze()
values_sub = gp(validation_sub, return_cov=False).squeeze()
error_gp_comp = norm(pred_values-values_sub)/norm(values_sub)
print('-----------error_gp_comp---------', error_gp_comp)
print('N', sampler.ntraining_samples, 'Error', error_gp_comp)
if sample_step >= 1:
errors[sample_step -1] = error_gp_comp
nsamples[sample_step - 1] = num_samples[sample_step -1]
pickle.dump(gp, open(f'gp_{sample_step}.pkl', "wb"))
sample_step += 1
if flag > 0:
errors, nsamples = errors[:sample_step], nsamples[:sample_step]
print('Terminating study. Points are becoming ill conditioned')
break
if return_samples:
return errors, nsamples, sampler.training_samples[:, 0:num_samples[sample_step - 1]]
return errors, nsamples
def unnormalized_posterior(gp, prior_pdf, samples, temper_param=1):
prior_vals = prior_pdf(samples).squeeze()
gp_vals = gp.predict(samples.T).squeeze()
breakpoint()
unnormalized_posterior_vals = prior_vals*np.exp(-gp_vals)**temper_param
return unnormalized_posterior_vals
class BayesianInferenceCholeskySampler(CholeskySampler):
def __init__(self, prior_pdf, num_vars,
num_candidate_samples, variables,
max_num_samples=None, generate_random_samples=None,
temper=True, true_nll=None):
self.prior_pdf = prior_pdf
if not temper:
self.temper_param = 1
else:
self.temper_param = 0
self.true_nll = true_nll
self.gp = None
super().__init__(num_vars, num_candidate_samples, variables,
None, generate_random_samples)
def set_gaussian_process(self, gp):
self.gp = gp
# Qian: understand the purpose of function increment_temper_param()
def increment_temper_param(self, num_training_samples):
# samples = np.random.uniform(0, 1, (self.nvars, 1000))
samples = generate_independent_random_samples(self.variables, 1000)
density_vals_prev = self.weight_function(samples)
def objective(beta):
new_weight_function = partial(
unnormalized_posterior, self.gp, self.prior_pdf,
temper_param=beta)
density_vals = new_weight_function(samples)
II = np.where(density_vals_prev > 1e-15)[0]
JJ = np.where(density_vals_prev < 1e-15)[0]
assert len(np.where(density_vals[JJ] > 1e-15)[0]) == 0
ratio = np.zeros(samples.shape[1])
ratio[II] = density_vals[II]/density_vals_prev[II]
obj = ratio.std()/ratio.mean()
return obj
print('temper parameter', self.temper_param)
x0 = self.temper_param+1e-4
# result = root(lambda b: objective(b)-1, x0)
# x_opt = result.x
x_opt = bisect(lambda b: objective(b)-1, x0, 1)
self.temper_param = x_opt
def __call__(self, num_samples):
if self.gp is None:
raise ValueError("must call self.set_gaussian_process()")
if self.ntraining_samples > 0 and self.temper_param < 1:
self.increment_temper_param(self.training_samples)
assert self.temper_param <= 1
if self.ntraining_samples == 0:
weight_function = self.prior_pdf
else:
if self.true_nll is not None:
def weight_function(x): return self.prior_pdf(x)*np.exp(
-self.true_nll(x)[:, 0])**self.temper_param
else:
weight_function = partial(
unnormalized_posterior, self.gp, self.prior_pdf,
temper_param=self.temper_param)
self.set_weight_function(weight_function)
samples, flag = super().__call__(num_samples)
return samples, flag
def get_prior_samples(num_vars, variables, nsamples):
rosenbrock_samples = generate_independent_random_samples(variables, nsamples)
return rosenbrock_samples
def bayesian_inference_example():
# read parameter distributions
datapath = file_settings()[1]
para_info = pd.read_csv(datapath + 'Parameters-PCE.csv')
# define the variables for PCE
param_file = file_settings()[-1]
# Must set variables if not using uniform prior on [0,1]^D
# variables = None
ind_vars, variables = variables_prep(param_file, product_uniform='uniform', dummy=False)
var_trans = AffineRandomVariableTransformation(variables, enforce_bounds=True)
init_scale = 0.1 # used to define length_scale for the kernel
num_vars = variables.nvars
num_candidate_samples = 20000
num_new_samples = np.asarray([2]+[5]*6+[15]*6+[25]*8)
nvalidation_samples = 10000
from scipy import stats
# breakpoint()
prior_pdf = partial(tensor_product_pdf,
univariate_pdfs=[partial(stats.beta.pdf, a=1, b=1, scale=ind_vars[ii].args[1]) for ii in range(num_vars)])
# Get validation samples from prior
rosenbrock_samples = get_prior_samples(num_vars, variables, nvalidation_samples + num_candidate_samples)
def generate_random_samples(nsamples, idx=0):
assert idx+nsamples <= rosenbrock_samples.shape[1]
return rosenbrock_samples[:, idx:idx+nsamples]
generate_validation_samples = partial(
generate_random_samples, nvalidation_samples,
idx=num_candidate_samples)
def get_filename(method, fixed_scale):
filename = 'bayes-example-%s-d-%d-n-%d.npz' % (
method, num_vars, num_candidate_samples)
if not fixed_scale:
filename = filename[:-4]+'-opt.npz'
return filename
# defining kernel
length_scale = init_scale*np.ones(num_vars, dtype=float)
kernel = RBF(length_scale, (5e-2, 1))
# this is the one Qian should use. The others are for comparision only
adaptive_cholesky_sampler = BayesianInferenceCholeskySampler(
prior_pdf, num_vars, num_candidate_samples, variables,
max_num_samples=num_new_samples.sum(),
generate_random_samples=None)
adaptive_cholesky_sampler.set_kernel(copy.deepcopy(kernel))
samplers = [adaptive_cholesky_sampler]
methods = ['Learning-Weighted-Cholesky-b']
labels = [r'$\mathrm{Adapted\;Weighted\;Cholesky}$']
fixed_scales = [False]
for sampler, method, fixed_scale in zip(samplers, methods, fixed_scales):
filename = get_filename(method, fixed_scale)
print(filename)
if os.path.exists(filename):
continue
if fixed_scale:
update_kernel_scale_num_samples = np.empty(0)
else:
update_kernel_scale_num_samples = np.cumsum(num_new_samples)
cond_nums = []
temper_params = []
def callback(gp):
cond_nums.append(np.linalg.cond(gp.L_.dot(gp.L_.T)))
if hasattr(sampler, 'temper_param'):
temper_params.append(sampler.temper_param)
print(temper_params)
errors, nsamples, samples = convergence_study(
kernel, run_source_lsq, sampler, num_vars,
generate_validation_samples, num_new_samples,
update_kernel_scale_num_samples, callback=callback,
return_samples=True)
np.savez(filename, nsamples=nsamples, errors=errors,
cond_nums=np.asarray(cond_nums), samples=samples,
temper_params=np.asarray(temper_params))
fig, axs = plt.subplots(1, 3, figsize=(3*8, 6), sharey=False)
styles = ['-']
# styles = ['k-','r-.','b--','g:']
for method, label, ls, fixed_scale in zip(
methods, labels, styles, fixed_scales):
filename = get_filename(method, fixed_scale)
data = np.load(filename)
nsamples, errors = data['nsamples'][:-1], data['errors'][:-1]
temper_params, cond_nums = data['temper_params'][1:-1], data['cond_nums'][:-1]
axs[0].loglog(nsamples, errors, ls=ls, label=label)
axs[1].loglog(nsamples, cond_nums, ls=ls, label=label)
axs[2].semilogy(np.arange(1, nsamples.shape[0]),
temper_params, 'k-o')
axs[2].set_xlabel(r'$\mathrm{Iteration}$ $j$')
axs[2].set_ylabel(r'$\beta_j$')
for ii in range(2):
axs[ii].set_xlabel(r'$m$')
axs[ii].set_xlim(10, 1000)
axs[0].set_ylabel(r'$\tilde{\epsilon}_{\omega,2}$', rotation=90)
ylim0 = axs[0].get_ylim()
ylim1 = axs[1].get_ylim()
ylim = [min(ylim0[0], ylim1[0]), max(ylim0[1], ylim1[1])]
axs[0].set_ylim(ylim)
axs[1].set_ylim(ylim)
axs[1].set_ylabel(r'$\kappa$', rotation=90)
figname = 'bayes_example_comparison_%d.pdf' % num_vars
axs[0].legend()
plt.savefig(figname)
if __name__ == '__main__':
try:
import sklearn
except:
msg = 'Install sklearn using pip install sklearn'
raise Exception(msg)
bayesian_inference_example()
|
en
| 0.629833
|
#!/usr/bin/env python # from scipy.optimize import root # use latex for all text handling # gives best resolution plots # print mpl.rcParams.keys() # Create the copy of models and veneer list # obtain the initial values of parameters Script used to run_source and return the output file. The function is called by AdaptiveLejaPCE. # Define objective functions # Use annual or monthly loads Obtain the sum of timeseries of different temporal scale. temp_scale: str, default is 'Y', monthly using 'M' # End timeseries_sum() # Define functions for the objective functions # import observation if the output.txt requires the use of obs. # loop over the vars and try to use parallel # set the time period of the results # define the modeling period and the recording variables # obtain the sum at a given temporal scale # din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]]) # breakpoint() # END run_source_lsq() # dirty hack to include two GP kernel types (for IVAR) # in this case, kernel is an array and we assume to have received # two kernels # Instantiate a Gaussian Process model # Mesh the input space for evaluations of the real function, # the prediction and its MSE # if using const * rbf + noise kernel # kernel.theta = gp.kernel_.k1.k2.theta # if using const * rbf # kernel.theta = gp.kernel_.k2.theta # if using rbf # Fit to data using Maximum Likelihood Estimation of the parameters # if True: # allow points to be added to gp more often than gp is evaluated for # validation # validation_values = function(validation_samples).squeeze() # Compute error # assert pred_values.shape == validation_values.shape # error = norm(pred_values-validation_values)/norm(validation_values) # Compute error # Qian: understand the purpose of function increment_temper_param() # samples = np.random.uniform(0, 1, (self.nvars, 1000)) # result = root(lambda b: objective(b)-1, x0) # x_opt = result.x # read parameter distributions # define the variables for PCE # Must set variables if not using uniform prior on [0,1]^D # variables = None # used to define length_scale for the kernel # breakpoint() # Get validation samples from prior # defining kernel # this is the one Qian should use. The others are for comparision only # styles = ['k-','r-.','b--','g:']
| 1.704809
| 2
|
airflow/contrib/operators/gcs_copy_operator.py
|
KarthikKothareddy/AirFlow
| 0
|
6629666
|
<reponame>KarthikKothareddy/AirFlow<filename>airflow/contrib/operators/gcs_copy_operator.py
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageCopyOperator(BaseOperator):
"""
Copies objects (optionally from a directory) filtered by 'delimiter' (file extension for e.g .json) from a bucket
to another bucket in a different directory, if required.
:param source_bucket: The source Google cloud storage bucket where the object is.
:type source_bucket: string
:param source_object: The source name of the object to copy in the Google cloud
storage bucket.
:type source_object: string
:param source_files_delimiter: The delimiter by which you want to filter the files to copy.
For e.g to copy the CSV files from in a directory in GCS you would use source_files_delimiter='.csv'.
:type source_files_delimiter: string
:param destination_bucket: The destination Google cloud storage bucket where the object should be.
:type destination_bucket: string
:param destination_directory: The destination name of the directory in the destination Google cloud
storage bucket.
:type destination_directory: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
**Example**:
The following Operator would move all the CSV files from `sales/sales-2017` folder in
`data` bucket to `sales` folder in `archive` bucket. ::
move_file = GoogleCloudStorageCopyOperator(
task_id='move_file',
source_bucket='data',
source_object='sales/sales-2017/',
source_files_delimiter='.csv'
destination_bucket='archive',
destination_directory='sales',
google_cloud_storage_conn_id='airflow-service-account'
)
"""
template_fields = ('source_bucket', 'source_object', 'source_files_delimiter',
'destination_bucket', 'destination_directory')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
source_bucket,
source_object,
source_files_delimiter=None,
destination_bucket=None,
destination_directory='',
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageCopyOperator, self).__init__(*args, **kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.source_files_delimiter = source_files_delimiter
self.files_to_copy = list()
self.destination_bucket = destination_bucket
self.destination_directory = destination_directory
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
self.log.info('Executing copy - Source_Bucket: %s, Source_directory: %s, '
'Destination_bucket: %s, Destination_directory: %s',
self.source_bucket, self.source_object,
self.destination_bucket or self.source_bucket,
self.destination_directory or self.source_object)
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
self.log.info('Getting list of the files to copy. Source Bucket: %s; Source Object: %s',
self.source_bucket, self.source_object)
# Create a list of objects to copy from Source bucket. The function uses prefix keyword to pass the name of
# the object to copy.
self.files_to_copy = hook.list(bucket=self.source_bucket, prefix=self.source_object,
delimiter=self.source_files_delimiter)
# Log the names of all objects to be copied
self.log.info('Files to copy: %s', self.files_to_copy)
if self.files_to_copy is not None:
for file_to_copy in self.files_to_copy:
self.log.info('Source_Bucket: %s, Source_Object: %s, '
'Destination_bucket: %s, Destination_Directory: %s',
self.source_bucket, file_to_copy,
self.destination_bucket or self.source_bucket,
self.destination_directory + file_to_copy)
hook.copy(self.source_bucket, file_to_copy,
self.destination_bucket, self.destination_directory + file_to_copy)
else:
self.log.info('No Files to copy.')
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageCopyOperator(BaseOperator):
"""
Copies objects (optionally from a directory) filtered by 'delimiter' (file extension for e.g .json) from a bucket
to another bucket in a different directory, if required.
:param source_bucket: The source Google cloud storage bucket where the object is.
:type source_bucket: string
:param source_object: The source name of the object to copy in the Google cloud
storage bucket.
:type source_object: string
:param source_files_delimiter: The delimiter by which you want to filter the files to copy.
For e.g to copy the CSV files from in a directory in GCS you would use source_files_delimiter='.csv'.
:type source_files_delimiter: string
:param destination_bucket: The destination Google cloud storage bucket where the object should be.
:type destination_bucket: string
:param destination_directory: The destination name of the directory in the destination Google cloud
storage bucket.
:type destination_directory: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
**Example**:
The following Operator would move all the CSV files from `sales/sales-2017` folder in
`data` bucket to `sales` folder in `archive` bucket. ::
move_file = GoogleCloudStorageCopyOperator(
task_id='move_file',
source_bucket='data',
source_object='sales/sales-2017/',
source_files_delimiter='.csv'
destination_bucket='archive',
destination_directory='sales',
google_cloud_storage_conn_id='airflow-service-account'
)
"""
template_fields = ('source_bucket', 'source_object', 'source_files_delimiter',
'destination_bucket', 'destination_directory')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
source_bucket,
source_object,
source_files_delimiter=None,
destination_bucket=None,
destination_directory='',
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageCopyOperator, self).__init__(*args, **kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.source_files_delimiter = source_files_delimiter
self.files_to_copy = list()
self.destination_bucket = destination_bucket
self.destination_directory = destination_directory
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
self.log.info('Executing copy - Source_Bucket: %s, Source_directory: %s, '
'Destination_bucket: %s, Destination_directory: %s',
self.source_bucket, self.source_object,
self.destination_bucket or self.source_bucket,
self.destination_directory or self.source_object)
hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
self.log.info('Getting list of the files to copy. Source Bucket: %s; Source Object: %s',
self.source_bucket, self.source_object)
# Create a list of objects to copy from Source bucket. The function uses prefix keyword to pass the name of
# the object to copy.
self.files_to_copy = hook.list(bucket=self.source_bucket, prefix=self.source_object,
delimiter=self.source_files_delimiter)
# Log the names of all objects to be copied
self.log.info('Files to copy: %s', self.files_to_copy)
if self.files_to_copy is not None:
for file_to_copy in self.files_to_copy:
self.log.info('Source_Bucket: %s, Source_Object: %s, '
'Destination_bucket: %s, Destination_Directory: %s',
self.source_bucket, file_to_copy,
self.destination_bucket or self.source_bucket,
self.destination_directory + file_to_copy)
hook.copy(self.source_bucket, file_to_copy,
self.destination_bucket, self.destination_directory + file_to_copy)
else:
self.log.info('No Files to copy.')
|
en
| 0.743521
|
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Copies objects (optionally from a directory) filtered by 'delimiter' (file extension for e.g .json) from a bucket to another bucket in a different directory, if required. :param source_bucket: The source Google cloud storage bucket where the object is. :type source_bucket: string :param source_object: The source name of the object to copy in the Google cloud storage bucket. :type source_object: string :param source_files_delimiter: The delimiter by which you want to filter the files to copy. For e.g to copy the CSV files from in a directory in GCS you would use source_files_delimiter='.csv'. :type source_files_delimiter: string :param destination_bucket: The destination Google cloud storage bucket where the object should be. :type destination_bucket: string :param destination_directory: The destination name of the directory in the destination Google cloud storage bucket. :type destination_directory: string :param google_cloud_storage_conn_id: The connection ID to use when connecting to Google cloud storage. :type google_cloud_storage_conn_id: string :param delegate_to: The account to impersonate, if any. For this to work, the service account making the request must have domain-wide delegation enabled. :type delegate_to: string **Example**: The following Operator would move all the CSV files from `sales/sales-2017` folder in `data` bucket to `sales` folder in `archive` bucket. :: move_file = GoogleCloudStorageCopyOperator( task_id='move_file', source_bucket='data', source_object='sales/sales-2017/', source_files_delimiter='.csv' destination_bucket='archive', destination_directory='sales', google_cloud_storage_conn_id='airflow-service-account' ) # Create a list of objects to copy from Source bucket. The function uses prefix keyword to pass the name of # the object to copy. # Log the names of all objects to be copied
| 2.248943
| 2
|
fargo/__init__.py
|
te-je/fargo
| 0
|
6629667
|
<gh_stars>0
import re
import sys
from codecs import open
from os import path
import chardet
import click
from dulwich.repo import Repo
__all__ = ['__version__', 'main', 'find_and_replace']
# Read the package version from the VERSION.txt file.
this_dir = path.dirname(__file__)
version_file = open(path.join(this_dir, 'VERSION.txt'), encoding='ascii')
__version__ = version_file.read().strip()
version_file.close()
def show_version_and_exit():
click.echo(__version__)
sys.exit(0)
def _check_and_show_version(_, __, is_set):
if is_set:
show_version_and_exit()
@click.command()
@click.option('--version', '-V', is_flag=True,
help="show the version number and exit.",
callback=_check_and_show_version)
@click.option('--use-regex', '-P', is_flag=True, default=False,
help='Enable regex pattern matching (disabled by default).')
@click.option('--interactive', '-i', is_flag=True,
help='run in interactive mode')
@click.argument('search', nargs=1)
@click.argument('replacement', required=False)
@click.argument('repo', nargs=1, required=False, default=u'.',
type=click.Path(file_okay=False))
def main(**kwargs):
kwargs.pop('version')
find_and_replace(**kwargs)
def find_and_replace(search, replacement, repo='.', chardet_threshold=0.85,
fallback_encoding=None, interactive=False,
use_regex=False):
"""Find and replace items inside tracked files
:param search: The text to search for. If ``use_regex`` is truthy, then
this can be a regular expression.
:param replacement: The substition text for matches. If ``use_regex`` is
truthy, this can be an expansion pattern (e.g. \1 to match the first
capture group) similar to what is passed to ``re.sub``.
:param repo: A git repository containing cached files to perform
substitutions in
:param chardet_threshold: When guessing a file encoding, guesses with
confidence below this threshold will not be used
:param fallback_encoding: The default file encoding to use when
the encoding can't be confidently guessed. If ``None`` is given,
then the file is skipped.
:param interactive: If truthy, prompt on stdin before making substitutions
:param use_regex: If truthy, ``search`` and ``replacement`` parameters
have have different meanings.
"""
replacement = replacement or u""
for filename in _iter_repo_files(repo):
contents = _get_file_contents(
filename,
chardet_threshold,
fallback_encoding
)
if contents is None:
continue
encoding, text = contents
lines = text.splitlines(True)
changed_lines = []
line_iter = _iter_occurences_by_line(search, text, use_regex)
for lineno, matches in line_iter:
# Where does this line start in the text?
cursor = len(u"".join(lines[:lineno - 1]))
# Do the preliminary output for this line
click.echo(u'{}:L{}:'.format(filename, lineno), nl=False)
# Process this line
chunks = _get_line_chunks(
matches, cursor, replacement, text, use_regex
)
for i, (unchanged, old, new) in enumerate(chunks):
click.echo(unchanged, nl=False)
if old or new:
if interactive:
click.secho(str(i), bg='white', fg='black', nl=False)
click.secho(old, bg='red', nl=False)
click.secho(new, bg='green', nl=False)
# Special case for file that doesn't end in newline
if unchanged[-1:] not in ('\n', '\r'):
click.echo('')
if interactive:
# Ask which chunks to replace
keep = _prompt_replace_items(len(chunks) - 1)
chunks = [
item if i in keep
else [item[0], item[1], item[1]]
for i, item in enumerate(chunks)
]
# Rebuild the line from the chunks
changed_line = u''.join(
u''.join((unchanged, new)) for unchanged, _, new in chunks
)
changed_lines.append((lineno, changed_line))
# Compute the updated text
for lineno, line in changed_lines:
lines[lineno - 1] = line
updated_text = u''.join(lines)
if text != updated_text:
_put_file_contents(filename, updated_text, encoding)
def _get_file_contents(filename, chardet_threshold, fallback_encoding):
with open(filename, 'rb') as fh:
data = fh.read()
guess = chardet.detect(data)
if guess['confidence'] > chardet_threshold:
encoding = guess['encoding']
else:
encoding = fallback_encoding
if encoding is None:
return None
try:
text = data.decode(encoding)
except UnicodeDecodeError:
return None
else:
return encoding, text
def _put_file_contents(filename, text, encoding):
# Try to encode text with the encoding. If successful, write it out
# to the file. Avoids overwriting a file when the text can't be
# encoded
try:
data = text.encode(encoding)
except UnicodeEncodeError:
return
else:
with open(filename, 'wb') as fh:
fh.write(data)
def _get_line_chunks(matches, cursor, replacement, text, use_regex):
deltas = []
for match in matches:
# the unchanged part, before the match
unchanged = text[cursor: match.start()]
sub = match.expand(replacement) if use_regex else replacement
deltas.append([unchanged, match.group(), sub])
# advance the cursor
cursor = match.end()
# Finally, add the item after the last match
end = text.find(u"\n", cursor)
unchanged = text[cursor:] if end == -1 else text[cursor: end + 1]
deltas.append([unchanged, '', ''])
return deltas
def _prompt_replace_items(count):
ans = click.prompt(u"Accept replacements?", default=u'yes')
if u'yes'.startswith(ans.lower()):
return range(count)
elif u'no'.startswith(ans.lower()):
return range(0)
else:
try:
repl_only = []
for num in ans.split():
ind = int(num.strip())
if ind not in range(count):
raise IndexError
repl_only.append(ind)
except:
return _prompt_replace_items(count)
else:
return repl_only
def _iter_occurences_by_line(search, text, use_regex=False):
# Caches all matches on the same line, yielding them all at once
cache = []
curline = 0
for match in _iter_occurences(search, text, use_regex=use_regex):
lineno = text.count(u"\n", 0, match.start()) + 1
if curline != lineno and cache:
yield curline, cache
cache = []
curline = lineno
cache.append(match)
if cache:
yield curline, cache
def _iter_repo_files(repo):
repoobj = Repo(repo)
for filename in repoobj.open_index():
filename = filename.decode(sys.getfilesystemencoding())
yield path.normpath(path.join(repo, filename))
def _iter_occurences(search, text, use_regex=False):
pattern = re.compile(search if use_regex else re.escape(search))
match = pattern.search(text)
while match is not None:
yield match
match = pattern.search(text, match.end())
|
import re
import sys
from codecs import open
from os import path
import chardet
import click
from dulwich.repo import Repo
__all__ = ['__version__', 'main', 'find_and_replace']
# Read the package version from the VERSION.txt file.
this_dir = path.dirname(__file__)
version_file = open(path.join(this_dir, 'VERSION.txt'), encoding='ascii')
__version__ = version_file.read().strip()
version_file.close()
def show_version_and_exit():
click.echo(__version__)
sys.exit(0)
def _check_and_show_version(_, __, is_set):
if is_set:
show_version_and_exit()
@click.command()
@click.option('--version', '-V', is_flag=True,
help="show the version number and exit.",
callback=_check_and_show_version)
@click.option('--use-regex', '-P', is_flag=True, default=False,
help='Enable regex pattern matching (disabled by default).')
@click.option('--interactive', '-i', is_flag=True,
help='run in interactive mode')
@click.argument('search', nargs=1)
@click.argument('replacement', required=False)
@click.argument('repo', nargs=1, required=False, default=u'.',
type=click.Path(file_okay=False))
def main(**kwargs):
kwargs.pop('version')
find_and_replace(**kwargs)
def find_and_replace(search, replacement, repo='.', chardet_threshold=0.85,
fallback_encoding=None, interactive=False,
use_regex=False):
"""Find and replace items inside tracked files
:param search: The text to search for. If ``use_regex`` is truthy, then
this can be a regular expression.
:param replacement: The substition text for matches. If ``use_regex`` is
truthy, this can be an expansion pattern (e.g. \1 to match the first
capture group) similar to what is passed to ``re.sub``.
:param repo: A git repository containing cached files to perform
substitutions in
:param chardet_threshold: When guessing a file encoding, guesses with
confidence below this threshold will not be used
:param fallback_encoding: The default file encoding to use when
the encoding can't be confidently guessed. If ``None`` is given,
then the file is skipped.
:param interactive: If truthy, prompt on stdin before making substitutions
:param use_regex: If truthy, ``search`` and ``replacement`` parameters
have have different meanings.
"""
replacement = replacement or u""
for filename in _iter_repo_files(repo):
contents = _get_file_contents(
filename,
chardet_threshold,
fallback_encoding
)
if contents is None:
continue
encoding, text = contents
lines = text.splitlines(True)
changed_lines = []
line_iter = _iter_occurences_by_line(search, text, use_regex)
for lineno, matches in line_iter:
# Where does this line start in the text?
cursor = len(u"".join(lines[:lineno - 1]))
# Do the preliminary output for this line
click.echo(u'{}:L{}:'.format(filename, lineno), nl=False)
# Process this line
chunks = _get_line_chunks(
matches, cursor, replacement, text, use_regex
)
for i, (unchanged, old, new) in enumerate(chunks):
click.echo(unchanged, nl=False)
if old or new:
if interactive:
click.secho(str(i), bg='white', fg='black', nl=False)
click.secho(old, bg='red', nl=False)
click.secho(new, bg='green', nl=False)
# Special case for file that doesn't end in newline
if unchanged[-1:] not in ('\n', '\r'):
click.echo('')
if interactive:
# Ask which chunks to replace
keep = _prompt_replace_items(len(chunks) - 1)
chunks = [
item if i in keep
else [item[0], item[1], item[1]]
for i, item in enumerate(chunks)
]
# Rebuild the line from the chunks
changed_line = u''.join(
u''.join((unchanged, new)) for unchanged, _, new in chunks
)
changed_lines.append((lineno, changed_line))
# Compute the updated text
for lineno, line in changed_lines:
lines[lineno - 1] = line
updated_text = u''.join(lines)
if text != updated_text:
_put_file_contents(filename, updated_text, encoding)
def _get_file_contents(filename, chardet_threshold, fallback_encoding):
with open(filename, 'rb') as fh:
data = fh.read()
guess = chardet.detect(data)
if guess['confidence'] > chardet_threshold:
encoding = guess['encoding']
else:
encoding = fallback_encoding
if encoding is None:
return None
try:
text = data.decode(encoding)
except UnicodeDecodeError:
return None
else:
return encoding, text
def _put_file_contents(filename, text, encoding):
# Try to encode text with the encoding. If successful, write it out
# to the file. Avoids overwriting a file when the text can't be
# encoded
try:
data = text.encode(encoding)
except UnicodeEncodeError:
return
else:
with open(filename, 'wb') as fh:
fh.write(data)
def _get_line_chunks(matches, cursor, replacement, text, use_regex):
deltas = []
for match in matches:
# the unchanged part, before the match
unchanged = text[cursor: match.start()]
sub = match.expand(replacement) if use_regex else replacement
deltas.append([unchanged, match.group(), sub])
# advance the cursor
cursor = match.end()
# Finally, add the item after the last match
end = text.find(u"\n", cursor)
unchanged = text[cursor:] if end == -1 else text[cursor: end + 1]
deltas.append([unchanged, '', ''])
return deltas
def _prompt_replace_items(count):
ans = click.prompt(u"Accept replacements?", default=u'yes')
if u'yes'.startswith(ans.lower()):
return range(count)
elif u'no'.startswith(ans.lower()):
return range(0)
else:
try:
repl_only = []
for num in ans.split():
ind = int(num.strip())
if ind not in range(count):
raise IndexError
repl_only.append(ind)
except:
return _prompt_replace_items(count)
else:
return repl_only
def _iter_occurences_by_line(search, text, use_regex=False):
# Caches all matches on the same line, yielding them all at once
cache = []
curline = 0
for match in _iter_occurences(search, text, use_regex=use_regex):
lineno = text.count(u"\n", 0, match.start()) + 1
if curline != lineno and cache:
yield curline, cache
cache = []
curline = lineno
cache.append(match)
if cache:
yield curline, cache
def _iter_repo_files(repo):
repoobj = Repo(repo)
for filename in repoobj.open_index():
filename = filename.decode(sys.getfilesystemencoding())
yield path.normpath(path.join(repo, filename))
def _iter_occurences(search, text, use_regex=False):
pattern = re.compile(search if use_regex else re.escape(search))
match = pattern.search(text)
while match is not None:
yield match
match = pattern.search(text, match.end())
|
en
| 0.803925
|
# Read the package version from the VERSION.txt file. Find and replace items inside tracked files :param search: The text to search for. If ``use_regex`` is truthy, then this can be a regular expression. :param replacement: The substition text for matches. If ``use_regex`` is truthy, this can be an expansion pattern (e.g. \1 to match the first capture group) similar to what is passed to ``re.sub``. :param repo: A git repository containing cached files to perform substitutions in :param chardet_threshold: When guessing a file encoding, guesses with confidence below this threshold will not be used :param fallback_encoding: The default file encoding to use when the encoding can't be confidently guessed. If ``None`` is given, then the file is skipped. :param interactive: If truthy, prompt on stdin before making substitutions :param use_regex: If truthy, ``search`` and ``replacement`` parameters have have different meanings. # Where does this line start in the text? # Do the preliminary output for this line # Process this line # Special case for file that doesn't end in newline # Ask which chunks to replace # Rebuild the line from the chunks # Compute the updated text # Try to encode text with the encoding. If successful, write it out # to the file. Avoids overwriting a file when the text can't be # encoded # the unchanged part, before the match # advance the cursor # Finally, add the item after the last match # Caches all matches on the same line, yielding them all at once
| 2.649555
| 3
|
rideapp/views.py
|
ur6yr/UVA-rideshare
| 0
|
6629668
|
from .models import CustomUser, Ride, Rider, Feedback
from .forms import CustomUserCreationForm, NicknameChangeForm, PhoneChangeForm, PhotoChangeForm, NewRideForm, JoinRequestForm, FeedbackForm, NewRideForm2, ProfileSetupForm
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import TemplateView
from django.views.generic.edit import CreateView
from django.urls import reverse_lazy, reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.conf import settings
from twilio.rest import Client
from datetime import datetime
import geopy.distance
from geopy.geocoders import Nominatim
from django.utils import timezone
import copy
# from stackoverflow.com/questions/48642075/position-between-two-lat-long-coordinates-in-python
def find_point(a,b,alpha = 0.5):
assert(0<=alpha<=1)
new_a = ((1-alpha) * a[0], (1-alpha)*a[1])
new_b = ((alpha) * b[0], alpha*b[1])
return [(new_a[0]+new_b[0]), (new_a[1]+new_b[1])]
def message(target, message):
account_sid = settings.TWILIO_ACCOUNT_SID
auth_token = settings.TWILIO_AUTH_TOKEN
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body="Hi " + target.first_name + ", " + message,
from_='+12568530059',
to='+1'+str(target.phone_number).replace('(','').replace(')','').replace('-','').replace(' ','')
)
print(message.sid)
def reverseSearch(coords):
#cod = coords.deconstruct()[2]
geolocator = Nominatim(user_agent="http://127.0.0.1:8000")
location = geolocator.reverse(coords, language='en')
#print(location.address)
#print(type(location.address))
return location.address
def generalLoc(coords):
geolocator = Nominatim(user_agent="http://127.0.0.1:8000")
location = geolocator.reverse(coords, language='en')
loc_dict = location.raw
add = loc_dict['address']
print(add)
try:
ret = add['building']
except:
try:
ret = add['hamlet']
except:
try:
ret = add['suburb']
except:
try:
ret = add['city']
except:
ret = add['county']
ret2 = add['state']
return ret + ', ' + ret2
# Create your views here.
class HomePageView(TemplateView):
#Ride.objects.all().filter(rideEndLoc=None).delete()
template_name = 'home.html'
class SignUpView(CreateView):
#Ride.objects.all().filter(rideEndLoc=None).delete()
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
@login_required
def NicknameChangeView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_user = request.user
if request.method == 'POST':
form = NicknameChangeForm(request.POST)
if form.is_valid():
current_user.venmo_id = form.cleaned_data['venmo_id']
current_user.save()
return HttpResponseRedirect(reverse('profile'))
else:
proposed_name = ""
form = NicknameChangeForm(initial={'last_name': proposed_name})
context = {
'form': form,
'current_user': current_user,
}
return render(request, 'change_form.html', context)
@login_required
def ProfileSetupView(request):
current_user = request.user
if request.method == 'POST':
form = ProfileSetupForm(request.POST)
if form.is_valid():
current_user.phone_number = form.cleaned_data['phone_number']
current_user.save()
return HttpResponseRedirect(reverse('ride-list'))
else:
proposed_name = ""
form = ProfileSetupForm(initial={'last_name': proposed_name})
context = {
'form': form,
'current_user': current_user,
}
return render(request, 'rideapp/profilesetup.html', context)
@login_required
def PhoneChangeView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_user = request.user
if request.method == 'POST':
form = PhoneChangeForm(request.POST)
if form.is_valid():
current_user.phone_number = form.cleaned_data['phone_number']
current_user.save()
return HttpResponseRedirect(reverse('profile'))
else:
proposed_name = ""
form = PhoneChangeForm(initial={'last_name': proposed_name})
context = {
'form': form,
'current_user': current_user,
}
return render(request, 'change_form.html', context)
@login_required
def PhotoChangeView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_user = request.user
if request.method == 'POST':
form = PhotoChangeForm(request.POST)
if form.is_valid():
current_user.photo = request.FILES['photo']
current_user.save()
return HttpResponseRedirect(reverse('profile'))
else:
proposed_name = "default.jpg"
form = PhotoChangeForm(initial={'photo': proposed_name})
context = {
'form': form,
'current_user': current_user,
}
return render(request, 'change_form.html', context)
@login_required
def ridelistView(request):
if request.user.first_login == 0:
request.user.first_login = 1
request.user.save()
return redirect('profilesetup')
#Ride.objects.all().filter(rideEndLoc=None).delete()
#Ride.objects.all().delete()
posts = Ride.objects.all().order_by('-postedTime')
context = {
'posts' : posts
}
return render(request, 'rideapp/ridelist.html', context)
def aboutView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
return render(request, 'rideapp/about.html')
@login_required
def feedbackView(request,ride_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
the_ride = get_object_or_404(Ride, pk=ride_id)
if request.method == 'POST':
form = FeedbackForm(request.POST)
if form.is_valid():
current_feedback = form.save(commit=False)
current_feedback.driver = the_ride.driver
current_feedback.ride = the_ride
current_feedback.driver.avg_rating = str(round(((current_feedback.driver.count)*(current_feedback.driver.avg_rating)+int(current_feedback.rating))/(current_feedback.driver.count+1),2))
current_feedback.driver.count += 1
#current_feedback.driver.feedback.add(current_feedback.feedback)
current_feedback.postTime = datetime.now()
current_feedback.driver.save()
current_feedback.save()
return HttpResponseRedirect(reverse('ride-list'))
else:
form = FeedbackForm()
return render(request, 'rideapp/feedback.html', {'form': form})
@login_required
def profileView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
if Feedback.objects.filter(driver=request.user).count() > 0:
feedbacks = Feedback.objects.filter(driver=request.user)
else:
feedbacks = []
rat = round(request.user.avg_rating)
filled = []
not_filled = []
there = True
for i in range(rat):
filled.append(rat)
for i in range(5-rat):
not_filled.append(rat)
if Feedback.objects.filter(driver=request.user).count() > 0:
feed = Feedback.objects.filter(driver=request.user)
else:
feed = []
there = False
total = 1
if there:
total = feed.count()
fives = 0
fours = 0
threes = 0
twos = 0
ones = 0
if there:
for obj in feed:
if obj.rating == 5:
fives += 1
if obj.rating == 4:
fours += 1
if obj.rating == 3:
threes += 1
if obj.rating == 2:
twos += 1
if obj.rating == 1:
ones += 1
len_5 = str(round(fives/total*100)) + "%"
len_4 = str(round(fours/total*100)) + "%"
len_3 = str(round(threes/total*100)) + "%"
len_2 = str(round(twos/total*100)) + "%"
len_1 = str(round(ones/total*100)) + "%"
context = {
'feedbacks' : feedbacks,
'filled':filled,
'not_filled':not_filled,
'len_5':len_5,
'len_4':len_4,
'len_3':len_3,
'len_2':len_2,
'len_1':len_1,
'fives' : fives,
'fours' : fours,
'threes' : threes,
'twos' : twos,
'ones' : ones,
}
return render(request, 'rideapp/profile.html',context)
@login_required
def RideFormView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_ride = None
if request.method == 'POST':
form = NewRideForm(request.POST)
if form.is_valid():
current_ride = form.save(commit=False)
current_ride.postedTime = timezone.now()
current_ride.startAddress = reverseSearch(current_ride.rideStartLoc)
current_ride.driver = request.user
current_ride.save()
return HttpResponseRedirect(reverse('addride2', args=(current_ride.id,)))
else:
g = form._errors
form = NewRideForm()
form._errors = g
else:
form = NewRideForm()
#print("\n")
#print(form.errors)
#print("\n")
context = {
'form': form,
'current_user': current_ride,
}
return render(request, 'rideapp/add_ride.html', context)
@login_required
def RideFormView2(request, ride_id):
current_ride = get_object_or_404(Ride, pk=ride_id)
if request.method == 'POST':
form = NewRideForm2(request.POST)
if form.is_valid():
helper = form.save(commit=False)
current_ride.postedTime = timezone.now()
current_ride.rideEndLoc = helper.rideEndLoc
current_ride.endAddress = reverseSearch(helper.rideEndLoc)
current_ride.spacesAvailable = helper.spacesAvailable
current_ride.cost = helper.cost
current_ride.details = helper.details
current_ride.generalDest = generalLoc(helper.rideEndLoc)
current_ride.save()
#message(current_ride.driver,"you posted a ride dummy")
return HttpResponseRedirect(reverse('ride-list'))
else:
g = form._errors
form = NewRideForm2()
form._errors = g
else:
proposed_name = ""
form = NewRideForm2(initial={'last_name': proposed_name})
context = {
'form': form,
'current_user': current_ride,
}
return render(request, 'rideapp/add_ride2.html', context)
@login_required
def ridepage(request, ride_id, message=''):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride = get_object_or_404(Ride, pk=ride_id)
alreadyIn = ride.riderList.filter(user=request.user).count()
alreadyRequested = ride.riderRequests.all().filter(user=request.user).count()
print(type(ride.rideStartLoc[0]))
start = str(ride.rideStartLoc).replace('(','').replace(')','').split(',')
startLoc = [float(i) for i in start]
startLoc.reverse()
end = str(ride.rideEndLoc).replace('(','').replace(')','').split(',')
endLoc = [float(i) for i in end]
endLoc.reverse()
centerLoc = find_point(startLoc,endLoc) #[(startLoc[0]+endLoc[0])/2, (startLoc[1]+endLoc[1])/2]
#print(centerLoc)
mapKey = settings.MAPBOX_KEY
#print("\n")
#print(geopy.distance.vincenty(ride.rideStartLoc, ride.rideEndLoc).m)
#print("\n")
#generalLoc(ride.rideEndLoc)
#generalLoc(ride.rideStartLoc)
#print(startLoc)
#print(endLoc)
context = {
'ride': ride,
'alreadyIn': alreadyIn,
'alreadyRequested': alreadyRequested,
'startLoc': startLoc,
'endLoc': endLoc,
'mapKey': mapKey,
'centerLoc': centerLoc,
}
return render(request, 'rideapp/ridepage.html', context)
@login_required
def myDrivesView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
posts = Ride.objects.filter(driver=request.user).order_by('-postedTime')
context = {
'posts' : posts
}
return render(request, 'rideapp/mydrives.html', context)
@login_required
def myRidesView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
posts = Ride.objects.filter(riderList__user=request.user).order_by('-postedTime')
context = {
'posts' : posts
}
return render(request, 'rideapp/myrides.html', context)
@login_required
def myOldRidesView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
posts = Ride.objects.filter(rideDate__lt=datetime.today())
posts = posts.filter(Q(riderList__user=request.user) | Q(driver=request.user)).order_by('-postedTime')
context = {
'posts' : posts
}
return render(request, 'rideapp/myoldrides.html', context)
@login_required
def ride_list(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
filter = RideFilter(request.GET, queryset=Ride.object.all())
return render(request, 'rideapp/user_list.html', {'filter': filter})
@login_required
def userView(request, user_username):
#Ride.objects.all().filter(rideEndLoc=None).delete()
user_requested = get_object_or_404(CustomUser, username=user_username)
rat = round(user_requested.avg_rating)
filled = []
not_filled = []
there = True
for i in range(rat):
filled.append(rat)
for i in range(5-rat):
not_filled.append(rat)
if Feedback.objects.filter(driver=user_requested).count() > 0:
feed = Feedback.objects.filter(driver=user_requested)
else:
feed = []
there = False
total = 1
if there:
total = feed.count()
fives = 0
fours = 0
threes = 0
twos = 0
ones = 0
if there:
for obj in feed:
if obj.rating == 5:
fives += 1
if obj.rating == 4:
fours += 1
if obj.rating == 3:
threes += 1
if obj.rating == 2:
twos += 1
if obj.rating == 1:
ones += 1
len_5 = str(round(fives/total*100)) + "%"
len_4 = str(round(fours/total*100)) + "%"
len_3 = str(round(threes/total*100)) + "%"
len_2 = str(round(twos/total*100)) + "%"
len_1 = str(round(ones/total*100)) + "%"
context = {
'user_requested': user_requested,
'filled':filled,
'not_filled':not_filled,
'len_5':len_5,
'len_4':len_4,
'len_3':len_3,
'len_2':len_2,
'len_1':len_1,
'fives' : fives,
'fours' : fours,
'threes' : threes,
'twos' : twos,
'ones' : ones,
}
return render(request, 'rideapp/userpage2.html', context)
@login_required
def RideRequestView(request, ride_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_rider = None
ride_obj = get_object_or_404(Ride, id = ride_id)
if ride_obj.riderList.filter(user=request.user).count():
# console.log("exists")
return render(request, 'rideapp/profile.html', context)
if request.method == 'POST':
riderform = JoinRequestForm(request.POST)
if riderform.is_valid():
current_rider=riderform.save(commit=False)
current_rider.user= request.user
current_rider.save()
RequestHelper(ride_id,current_rider)
return HttpResponseRedirect(reverse('ridepage',args=[ride_id]))
else:
riderform = JoinRequestForm()
context = {
'riderform': riderform,
'current_rider': current_rider,
}
return render(request, 'rideapp/join_ride.html', context)
#Helper method for ride request view
def RequestHelper(ride_id, rider):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
ride_obj.riderRequests.add(rider)
ride_obj.save()
m = str(rider.user.first_name)+" has requested to join your ride. Head over to your mydrives page to approve or reject them."
try:
message(ride_obj.driver,m)
except:
None
def ApproveRider(request,ride_id,rider_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
rider_obj = get_object_or_404(Rider, id=rider_id)
ride_obj.riderList.add(rider_obj)
ride_obj.riderRequests.remove(rider_obj)
ride_obj.spacesAvailable-=1
ride_obj.save()
m = str(ride_obj.driver.first_name)+" has approved you to join their ride. You can see your rides at your myrides page."
try:
message(rider_obj.user,m)
except:
None
return redirect('/mydrives')
def DeclineRider(request,ride_id,rider_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
rider_obj = get_object_or_404(Rider, id=rider_id)
ride_obj.riderRequests.remove(rider_obj)
ride_obj.save()
m = str(ride_obj.driver.first_name)+" has declined your request to join their ride. Sorry!"
try:
message(rider_obj.user,m)
except:
None
return redirect('/mydrives')
def RemoveRider(request,ride_id,rider_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
rider_obj = get_object_or_404(Rider, id=rider_id)
ride_obj.riderList.remove(rider_obj)
ride_obj.spacesAvailable += 1
ride_obj.save()
m = str(ride_obj.driver.first_name)+" has removed you from their ride. You can see your rides at your myrides page."
try:
message(rider_obj.user,m)
except:
None
return redirect('/mydrives')
def LeaveRide(request,ride_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
rider_obj = ride_obj.riderList.get(user=request.user)
ride_obj.riderList.remove(rider_obj)
ride_obj.spacesAvailable += 1
ride_obj.save()
return redirect('/myrides')
def DeleteRide(request,ride_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
#Loop through riders and notify?
ride_obj.delete()
m = "You've deleted the ride. You can see your ride at your mydrives page."
try:
message(ride_obj.driver,m)
except:
None
return redirect('/mydrives')
|
from .models import CustomUser, Ride, Rider, Feedback
from .forms import CustomUserCreationForm, NicknameChangeForm, PhoneChangeForm, PhotoChangeForm, NewRideForm, JoinRequestForm, FeedbackForm, NewRideForm2, ProfileSetupForm
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import TemplateView
from django.views.generic.edit import CreateView
from django.urls import reverse_lazy, reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.conf import settings
from twilio.rest import Client
from datetime import datetime
import geopy.distance
from geopy.geocoders import Nominatim
from django.utils import timezone
import copy
# from stackoverflow.com/questions/48642075/position-between-two-lat-long-coordinates-in-python
def find_point(a,b,alpha = 0.5):
assert(0<=alpha<=1)
new_a = ((1-alpha) * a[0], (1-alpha)*a[1])
new_b = ((alpha) * b[0], alpha*b[1])
return [(new_a[0]+new_b[0]), (new_a[1]+new_b[1])]
def message(target, message):
account_sid = settings.TWILIO_ACCOUNT_SID
auth_token = settings.TWILIO_AUTH_TOKEN
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body="Hi " + target.first_name + ", " + message,
from_='+12568530059',
to='+1'+str(target.phone_number).replace('(','').replace(')','').replace('-','').replace(' ','')
)
print(message.sid)
def reverseSearch(coords):
#cod = coords.deconstruct()[2]
geolocator = Nominatim(user_agent="http://127.0.0.1:8000")
location = geolocator.reverse(coords, language='en')
#print(location.address)
#print(type(location.address))
return location.address
def generalLoc(coords):
geolocator = Nominatim(user_agent="http://127.0.0.1:8000")
location = geolocator.reverse(coords, language='en')
loc_dict = location.raw
add = loc_dict['address']
print(add)
try:
ret = add['building']
except:
try:
ret = add['hamlet']
except:
try:
ret = add['suburb']
except:
try:
ret = add['city']
except:
ret = add['county']
ret2 = add['state']
return ret + ', ' + ret2
# Create your views here.
class HomePageView(TemplateView):
#Ride.objects.all().filter(rideEndLoc=None).delete()
template_name = 'home.html'
class SignUpView(CreateView):
#Ride.objects.all().filter(rideEndLoc=None).delete()
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
@login_required
def NicknameChangeView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_user = request.user
if request.method == 'POST':
form = NicknameChangeForm(request.POST)
if form.is_valid():
current_user.venmo_id = form.cleaned_data['venmo_id']
current_user.save()
return HttpResponseRedirect(reverse('profile'))
else:
proposed_name = ""
form = NicknameChangeForm(initial={'last_name': proposed_name})
context = {
'form': form,
'current_user': current_user,
}
return render(request, 'change_form.html', context)
@login_required
def ProfileSetupView(request):
current_user = request.user
if request.method == 'POST':
form = ProfileSetupForm(request.POST)
if form.is_valid():
current_user.phone_number = form.cleaned_data['phone_number']
current_user.save()
return HttpResponseRedirect(reverse('ride-list'))
else:
proposed_name = ""
form = ProfileSetupForm(initial={'last_name': proposed_name})
context = {
'form': form,
'current_user': current_user,
}
return render(request, 'rideapp/profilesetup.html', context)
@login_required
def PhoneChangeView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_user = request.user
if request.method == 'POST':
form = PhoneChangeForm(request.POST)
if form.is_valid():
current_user.phone_number = form.cleaned_data['phone_number']
current_user.save()
return HttpResponseRedirect(reverse('profile'))
else:
proposed_name = ""
form = PhoneChangeForm(initial={'last_name': proposed_name})
context = {
'form': form,
'current_user': current_user,
}
return render(request, 'change_form.html', context)
@login_required
def PhotoChangeView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_user = request.user
if request.method == 'POST':
form = PhotoChangeForm(request.POST)
if form.is_valid():
current_user.photo = request.FILES['photo']
current_user.save()
return HttpResponseRedirect(reverse('profile'))
else:
proposed_name = "default.jpg"
form = PhotoChangeForm(initial={'photo': proposed_name})
context = {
'form': form,
'current_user': current_user,
}
return render(request, 'change_form.html', context)
@login_required
def ridelistView(request):
if request.user.first_login == 0:
request.user.first_login = 1
request.user.save()
return redirect('profilesetup')
#Ride.objects.all().filter(rideEndLoc=None).delete()
#Ride.objects.all().delete()
posts = Ride.objects.all().order_by('-postedTime')
context = {
'posts' : posts
}
return render(request, 'rideapp/ridelist.html', context)
def aboutView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
return render(request, 'rideapp/about.html')
@login_required
def feedbackView(request,ride_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
the_ride = get_object_or_404(Ride, pk=ride_id)
if request.method == 'POST':
form = FeedbackForm(request.POST)
if form.is_valid():
current_feedback = form.save(commit=False)
current_feedback.driver = the_ride.driver
current_feedback.ride = the_ride
current_feedback.driver.avg_rating = str(round(((current_feedback.driver.count)*(current_feedback.driver.avg_rating)+int(current_feedback.rating))/(current_feedback.driver.count+1),2))
current_feedback.driver.count += 1
#current_feedback.driver.feedback.add(current_feedback.feedback)
current_feedback.postTime = datetime.now()
current_feedback.driver.save()
current_feedback.save()
return HttpResponseRedirect(reverse('ride-list'))
else:
form = FeedbackForm()
return render(request, 'rideapp/feedback.html', {'form': form})
@login_required
def profileView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
if Feedback.objects.filter(driver=request.user).count() > 0:
feedbacks = Feedback.objects.filter(driver=request.user)
else:
feedbacks = []
rat = round(request.user.avg_rating)
filled = []
not_filled = []
there = True
for i in range(rat):
filled.append(rat)
for i in range(5-rat):
not_filled.append(rat)
if Feedback.objects.filter(driver=request.user).count() > 0:
feed = Feedback.objects.filter(driver=request.user)
else:
feed = []
there = False
total = 1
if there:
total = feed.count()
fives = 0
fours = 0
threes = 0
twos = 0
ones = 0
if there:
for obj in feed:
if obj.rating == 5:
fives += 1
if obj.rating == 4:
fours += 1
if obj.rating == 3:
threes += 1
if obj.rating == 2:
twos += 1
if obj.rating == 1:
ones += 1
len_5 = str(round(fives/total*100)) + "%"
len_4 = str(round(fours/total*100)) + "%"
len_3 = str(round(threes/total*100)) + "%"
len_2 = str(round(twos/total*100)) + "%"
len_1 = str(round(ones/total*100)) + "%"
context = {
'feedbacks' : feedbacks,
'filled':filled,
'not_filled':not_filled,
'len_5':len_5,
'len_4':len_4,
'len_3':len_3,
'len_2':len_2,
'len_1':len_1,
'fives' : fives,
'fours' : fours,
'threes' : threes,
'twos' : twos,
'ones' : ones,
}
return render(request, 'rideapp/profile.html',context)
@login_required
def RideFormView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_ride = None
if request.method == 'POST':
form = NewRideForm(request.POST)
if form.is_valid():
current_ride = form.save(commit=False)
current_ride.postedTime = timezone.now()
current_ride.startAddress = reverseSearch(current_ride.rideStartLoc)
current_ride.driver = request.user
current_ride.save()
return HttpResponseRedirect(reverse('addride2', args=(current_ride.id,)))
else:
g = form._errors
form = NewRideForm()
form._errors = g
else:
form = NewRideForm()
#print("\n")
#print(form.errors)
#print("\n")
context = {
'form': form,
'current_user': current_ride,
}
return render(request, 'rideapp/add_ride.html', context)
@login_required
def RideFormView2(request, ride_id):
current_ride = get_object_or_404(Ride, pk=ride_id)
if request.method == 'POST':
form = NewRideForm2(request.POST)
if form.is_valid():
helper = form.save(commit=False)
current_ride.postedTime = timezone.now()
current_ride.rideEndLoc = helper.rideEndLoc
current_ride.endAddress = reverseSearch(helper.rideEndLoc)
current_ride.spacesAvailable = helper.spacesAvailable
current_ride.cost = helper.cost
current_ride.details = helper.details
current_ride.generalDest = generalLoc(helper.rideEndLoc)
current_ride.save()
#message(current_ride.driver,"you posted a ride dummy")
return HttpResponseRedirect(reverse('ride-list'))
else:
g = form._errors
form = NewRideForm2()
form._errors = g
else:
proposed_name = ""
form = NewRideForm2(initial={'last_name': proposed_name})
context = {
'form': form,
'current_user': current_ride,
}
return render(request, 'rideapp/add_ride2.html', context)
@login_required
def ridepage(request, ride_id, message=''):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride = get_object_or_404(Ride, pk=ride_id)
alreadyIn = ride.riderList.filter(user=request.user).count()
alreadyRequested = ride.riderRequests.all().filter(user=request.user).count()
print(type(ride.rideStartLoc[0]))
start = str(ride.rideStartLoc).replace('(','').replace(')','').split(',')
startLoc = [float(i) for i in start]
startLoc.reverse()
end = str(ride.rideEndLoc).replace('(','').replace(')','').split(',')
endLoc = [float(i) for i in end]
endLoc.reverse()
centerLoc = find_point(startLoc,endLoc) #[(startLoc[0]+endLoc[0])/2, (startLoc[1]+endLoc[1])/2]
#print(centerLoc)
mapKey = settings.MAPBOX_KEY
#print("\n")
#print(geopy.distance.vincenty(ride.rideStartLoc, ride.rideEndLoc).m)
#print("\n")
#generalLoc(ride.rideEndLoc)
#generalLoc(ride.rideStartLoc)
#print(startLoc)
#print(endLoc)
context = {
'ride': ride,
'alreadyIn': alreadyIn,
'alreadyRequested': alreadyRequested,
'startLoc': startLoc,
'endLoc': endLoc,
'mapKey': mapKey,
'centerLoc': centerLoc,
}
return render(request, 'rideapp/ridepage.html', context)
@login_required
def myDrivesView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
posts = Ride.objects.filter(driver=request.user).order_by('-postedTime')
context = {
'posts' : posts
}
return render(request, 'rideapp/mydrives.html', context)
@login_required
def myRidesView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
posts = Ride.objects.filter(riderList__user=request.user).order_by('-postedTime')
context = {
'posts' : posts
}
return render(request, 'rideapp/myrides.html', context)
@login_required
def myOldRidesView(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
posts = Ride.objects.filter(rideDate__lt=datetime.today())
posts = posts.filter(Q(riderList__user=request.user) | Q(driver=request.user)).order_by('-postedTime')
context = {
'posts' : posts
}
return render(request, 'rideapp/myoldrides.html', context)
@login_required
def ride_list(request):
#Ride.objects.all().filter(rideEndLoc=None).delete()
filter = RideFilter(request.GET, queryset=Ride.object.all())
return render(request, 'rideapp/user_list.html', {'filter': filter})
@login_required
def userView(request, user_username):
#Ride.objects.all().filter(rideEndLoc=None).delete()
user_requested = get_object_or_404(CustomUser, username=user_username)
rat = round(user_requested.avg_rating)
filled = []
not_filled = []
there = True
for i in range(rat):
filled.append(rat)
for i in range(5-rat):
not_filled.append(rat)
if Feedback.objects.filter(driver=user_requested).count() > 0:
feed = Feedback.objects.filter(driver=user_requested)
else:
feed = []
there = False
total = 1
if there:
total = feed.count()
fives = 0
fours = 0
threes = 0
twos = 0
ones = 0
if there:
for obj in feed:
if obj.rating == 5:
fives += 1
if obj.rating == 4:
fours += 1
if obj.rating == 3:
threes += 1
if obj.rating == 2:
twos += 1
if obj.rating == 1:
ones += 1
len_5 = str(round(fives/total*100)) + "%"
len_4 = str(round(fours/total*100)) + "%"
len_3 = str(round(threes/total*100)) + "%"
len_2 = str(round(twos/total*100)) + "%"
len_1 = str(round(ones/total*100)) + "%"
context = {
'user_requested': user_requested,
'filled':filled,
'not_filled':not_filled,
'len_5':len_5,
'len_4':len_4,
'len_3':len_3,
'len_2':len_2,
'len_1':len_1,
'fives' : fives,
'fours' : fours,
'threes' : threes,
'twos' : twos,
'ones' : ones,
}
return render(request, 'rideapp/userpage2.html', context)
@login_required
def RideRequestView(request, ride_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
current_rider = None
ride_obj = get_object_or_404(Ride, id = ride_id)
if ride_obj.riderList.filter(user=request.user).count():
# console.log("exists")
return render(request, 'rideapp/profile.html', context)
if request.method == 'POST':
riderform = JoinRequestForm(request.POST)
if riderform.is_valid():
current_rider=riderform.save(commit=False)
current_rider.user= request.user
current_rider.save()
RequestHelper(ride_id,current_rider)
return HttpResponseRedirect(reverse('ridepage',args=[ride_id]))
else:
riderform = JoinRequestForm()
context = {
'riderform': riderform,
'current_rider': current_rider,
}
return render(request, 'rideapp/join_ride.html', context)
#Helper method for ride request view
def RequestHelper(ride_id, rider):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
ride_obj.riderRequests.add(rider)
ride_obj.save()
m = str(rider.user.first_name)+" has requested to join your ride. Head over to your mydrives page to approve or reject them."
try:
message(ride_obj.driver,m)
except:
None
def ApproveRider(request,ride_id,rider_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
rider_obj = get_object_or_404(Rider, id=rider_id)
ride_obj.riderList.add(rider_obj)
ride_obj.riderRequests.remove(rider_obj)
ride_obj.spacesAvailable-=1
ride_obj.save()
m = str(ride_obj.driver.first_name)+" has approved you to join their ride. You can see your rides at your myrides page."
try:
message(rider_obj.user,m)
except:
None
return redirect('/mydrives')
def DeclineRider(request,ride_id,rider_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
rider_obj = get_object_or_404(Rider, id=rider_id)
ride_obj.riderRequests.remove(rider_obj)
ride_obj.save()
m = str(ride_obj.driver.first_name)+" has declined your request to join their ride. Sorry!"
try:
message(rider_obj.user,m)
except:
None
return redirect('/mydrives')
def RemoveRider(request,ride_id,rider_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
rider_obj = get_object_or_404(Rider, id=rider_id)
ride_obj.riderList.remove(rider_obj)
ride_obj.spacesAvailable += 1
ride_obj.save()
m = str(ride_obj.driver.first_name)+" has removed you from their ride. You can see your rides at your myrides page."
try:
message(rider_obj.user,m)
except:
None
return redirect('/mydrives')
def LeaveRide(request,ride_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
rider_obj = ride_obj.riderList.get(user=request.user)
ride_obj.riderList.remove(rider_obj)
ride_obj.spacesAvailable += 1
ride_obj.save()
return redirect('/myrides')
def DeleteRide(request,ride_id):
#Ride.objects.all().filter(rideEndLoc=None).delete()
ride_obj = get_object_or_404(Ride, id=ride_id)
#Loop through riders and notify?
ride_obj.delete()
m = "You've deleted the ride. You can see your ride at your mydrives page."
try:
message(ride_obj.driver,m)
except:
None
return redirect('/mydrives')
|
en
| 0.30508
|
# from stackoverflow.com/questions/48642075/position-between-two-lat-long-coordinates-in-python #cod = coords.deconstruct()[2] #print(location.address) #print(type(location.address)) # Create your views here. #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #current_feedback.driver.feedback.add(current_feedback.feedback) #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #print("\n") #print(form.errors) #print("\n") #message(current_ride.driver,"you posted a ride dummy") #Ride.objects.all().filter(rideEndLoc=None).delete() #[(startLoc[0]+endLoc[0])/2, (startLoc[1]+endLoc[1])/2] #print(centerLoc) #print("\n") #print(geopy.distance.vincenty(ride.rideStartLoc, ride.rideEndLoc).m) #print("\n") #generalLoc(ride.rideEndLoc) #generalLoc(ride.rideStartLoc) #print(startLoc) #print(endLoc) #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() # console.log("exists") #Helper method for ride request view #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Ride.objects.all().filter(rideEndLoc=None).delete() #Loop through riders and notify?
| 1.979916
| 2
|
alipay/aop/api/response/AlipayOpenInstantdeliveryAccountQueryResponse.py
|
antopen/alipay-sdk-python-all
| 213
|
6629669
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.LogisticsAccountStatusDTO import LogisticsAccountStatusDTO
class AlipayOpenInstantdeliveryAccountQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenInstantdeliveryAccountQueryResponse, self).__init__()
self._balance = None
self._business_license = None
self._business_scope = None
self._credit_code = None
self._email = None
self._enterprise_address = None
self._enterprise_city = None
self._enterprise_district = None
self._enterprise_name = None
self._enterprise_province = None
self._enterprise_type = None
self._logistics_account_status = None
self._phone = None
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, value):
self._balance = value
@property
def business_license(self):
return self._business_license
@business_license.setter
def business_license(self, value):
self._business_license = value
@property
def business_scope(self):
return self._business_scope
@business_scope.setter
def business_scope(self, value):
self._business_scope = value
@property
def credit_code(self):
return self._credit_code
@credit_code.setter
def credit_code(self, value):
self._credit_code = value
@property
def email(self):
return self._email
@email.setter
def email(self, value):
self._email = value
@property
def enterprise_address(self):
return self._enterprise_address
@enterprise_address.setter
def enterprise_address(self, value):
self._enterprise_address = value
@property
def enterprise_city(self):
return self._enterprise_city
@enterprise_city.setter
def enterprise_city(self, value):
self._enterprise_city = value
@property
def enterprise_district(self):
return self._enterprise_district
@enterprise_district.setter
def enterprise_district(self, value):
self._enterprise_district = value
@property
def enterprise_name(self):
return self._enterprise_name
@enterprise_name.setter
def enterprise_name(self, value):
self._enterprise_name = value
@property
def enterprise_province(self):
return self._enterprise_province
@enterprise_province.setter
def enterprise_province(self, value):
self._enterprise_province = value
@property
def enterprise_type(self):
return self._enterprise_type
@enterprise_type.setter
def enterprise_type(self, value):
self._enterprise_type = value
@property
def logistics_account_status(self):
return self._logistics_account_status
@logistics_account_status.setter
def logistics_account_status(self, value):
if isinstance(value, list):
self._logistics_account_status = list()
for i in value:
if isinstance(i, LogisticsAccountStatusDTO):
self._logistics_account_status.append(i)
else:
self._logistics_account_status.append(LogisticsAccountStatusDTO.from_alipay_dict(i))
@property
def phone(self):
return self._phone
@phone.setter
def phone(self, value):
self._phone = value
def parse_response_content(self, response_content):
response = super(AlipayOpenInstantdeliveryAccountQueryResponse, self).parse_response_content(response_content)
if 'balance' in response:
self.balance = response['balance']
if 'business_license' in response:
self.business_license = response['business_license']
if 'business_scope' in response:
self.business_scope = response['business_scope']
if 'credit_code' in response:
self.credit_code = response['credit_code']
if 'email' in response:
self.email = response['email']
if 'enterprise_address' in response:
self.enterprise_address = response['enterprise_address']
if 'enterprise_city' in response:
self.enterprise_city = response['enterprise_city']
if 'enterprise_district' in response:
self.enterprise_district = response['enterprise_district']
if 'enterprise_name' in response:
self.enterprise_name = response['enterprise_name']
if 'enterprise_province' in response:
self.enterprise_province = response['enterprise_province']
if 'enterprise_type' in response:
self.enterprise_type = response['enterprise_type']
if 'logistics_account_status' in response:
self.logistics_account_status = response['logistics_account_status']
if 'phone' in response:
self.phone = response['phone']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.LogisticsAccountStatusDTO import LogisticsAccountStatusDTO
class AlipayOpenInstantdeliveryAccountQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenInstantdeliveryAccountQueryResponse, self).__init__()
self._balance = None
self._business_license = None
self._business_scope = None
self._credit_code = None
self._email = None
self._enterprise_address = None
self._enterprise_city = None
self._enterprise_district = None
self._enterprise_name = None
self._enterprise_province = None
self._enterprise_type = None
self._logistics_account_status = None
self._phone = None
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, value):
self._balance = value
@property
def business_license(self):
return self._business_license
@business_license.setter
def business_license(self, value):
self._business_license = value
@property
def business_scope(self):
return self._business_scope
@business_scope.setter
def business_scope(self, value):
self._business_scope = value
@property
def credit_code(self):
return self._credit_code
@credit_code.setter
def credit_code(self, value):
self._credit_code = value
@property
def email(self):
return self._email
@email.setter
def email(self, value):
self._email = value
@property
def enterprise_address(self):
return self._enterprise_address
@enterprise_address.setter
def enterprise_address(self, value):
self._enterprise_address = value
@property
def enterprise_city(self):
return self._enterprise_city
@enterprise_city.setter
def enterprise_city(self, value):
self._enterprise_city = value
@property
def enterprise_district(self):
return self._enterprise_district
@enterprise_district.setter
def enterprise_district(self, value):
self._enterprise_district = value
@property
def enterprise_name(self):
return self._enterprise_name
@enterprise_name.setter
def enterprise_name(self, value):
self._enterprise_name = value
@property
def enterprise_province(self):
return self._enterprise_province
@enterprise_province.setter
def enterprise_province(self, value):
self._enterprise_province = value
@property
def enterprise_type(self):
return self._enterprise_type
@enterprise_type.setter
def enterprise_type(self, value):
self._enterprise_type = value
@property
def logistics_account_status(self):
return self._logistics_account_status
@logistics_account_status.setter
def logistics_account_status(self, value):
if isinstance(value, list):
self._logistics_account_status = list()
for i in value:
if isinstance(i, LogisticsAccountStatusDTO):
self._logistics_account_status.append(i)
else:
self._logistics_account_status.append(LogisticsAccountStatusDTO.from_alipay_dict(i))
@property
def phone(self):
return self._phone
@phone.setter
def phone(self, value):
self._phone = value
def parse_response_content(self, response_content):
response = super(AlipayOpenInstantdeliveryAccountQueryResponse, self).parse_response_content(response_content)
if 'balance' in response:
self.balance = response['balance']
if 'business_license' in response:
self.business_license = response['business_license']
if 'business_scope' in response:
self.business_scope = response['business_scope']
if 'credit_code' in response:
self.credit_code = response['credit_code']
if 'email' in response:
self.email = response['email']
if 'enterprise_address' in response:
self.enterprise_address = response['enterprise_address']
if 'enterprise_city' in response:
self.enterprise_city = response['enterprise_city']
if 'enterprise_district' in response:
self.enterprise_district = response['enterprise_district']
if 'enterprise_name' in response:
self.enterprise_name = response['enterprise_name']
if 'enterprise_province' in response:
self.enterprise_province = response['enterprise_province']
if 'enterprise_type' in response:
self.enterprise_type = response['enterprise_type']
if 'logistics_account_status' in response:
self.logistics_account_status = response['logistics_account_status']
if 'phone' in response:
self.phone = response['phone']
|
en
| 0.352855
|
#!/usr/bin/env python # -*- coding: utf-8 -*-
| 2.020543
| 2
|
hello.py
|
aswinkumarrk/Python
| 0
|
6629670
|
<gh_stars>0
print"Hello!! AswinKumar!!"
print "Haha! Baby This is my first code in python,few of my python friends have said me python is a baby language,It is very easy to learn,just try it out."
print" I am now here learning python"
print "Are you joining me to learn python? you are always welcome!"
|
print"Hello!! AswinKumar!!"
print "Haha! Baby This is my first code in python,few of my python friends have said me python is a baby language,It is very easy to learn,just try it out."
print" I am now here learning python"
print "Are you joining me to learn python? you are always welcome!"
|
none
| 1
| 2.77879
| 3
|
|
mcarch/model/mod/logs.py
|
Scotsguy/MCArchive
| 0
|
6629671
|
"""
This module contains models for "logs" which are change submissions for mods.
"""
import datetime
from collections import OrderedDict
from sqlalchemy.orm import backref
from .base import *
from mcarch.model.user import User
from mcarch.app import db
def gen_diffs(mod):
"""
Takes a mod and generates a list of diffs representing the changes made in each log entry.
"""
logs = mod.logs
for i, log in enumerate(logs):
diff = None
if i > 0: diff = logs[i-1].diff(log)
else: diff = LogMod().diff(log)
yield {
'obj': log,
'user': log.user,
'date': log.date,
'diff': diff,
}
def slow_gen_diffs(logs):
"""
Unlike regular `gen_diffs`, this takes a list of logs entries that aren't
all for the same mod, and returns a list of diffs between each one and its
previous version. This is probably much slower than `gen_diffs` since it
queries the database for the entry previous to each one in the list.
"""
for i, log in enumerate(logs):
prev = LogMod.query.filter_by(cur_id=log.cur_id, index=log.index-1).first()
diff = prev.diff(log) if prev else LogMod().diff(log)
yield {
'obj': log,
'user': log.user,
'date': log.date,
'diff': diff,
}
authored_by_table = mk_authored_by_table('log_mod')
for_game_vsn_table = mk_for_game_vsn_table('log_mod_version')
class LogMod(ModBase, db.Model):
"""Represents a change made to a mod."""
__tablename__ = "log_mod"
# The user that made this change.
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=True)
user = db.relationship('User', backref='changes')
# Date this change was made.
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
cur_id = db.Column(db.Integer, db.ForeignKey('mod.id'), nullable=True)
current = db.relationship("Mod", backref=backref("logs", order_by='LogMod.date'))
# Index within this mod's list of versions.
index = db.Column(db.Integer, nullable=False)
authors = db.relationship(
"ModAuthor",
secondary=authored_by_table)
mod_vsns = db.relationship("LogModVersion", back_populates="mod")
def blank(self, **kwargs): return LogMod(**kwargs)
def blank_child(self, **kwargs): return LogModVersion(**kwargs)
def copy_from(self, other):
if hasattr(other, 'cur_id'): self.cur_id = other.cur_id
self.cur_id = other.id
super(ModBase, self).copy_from(other)
class LogModVersion(ModVersionBase, db.Model):
__tablename__ = "log_mod_version"
mod_id = db.Column(db.Integer, db.ForeignKey('log_mod.id'))
mod = db.relationship("LogMod", back_populates="mod_vsns")
game_vsns = db.relationship(
"GameVersion",
secondary=for_game_vsn_table)
files = db.relationship("LogModFile", back_populates="version")
cur_id = db.Column(db.Integer, db.ForeignKey('mod_version.id'), nullable=True)
current = db.relationship("ModVersion")
def blank(self, **kwargs): return LogModVersion(**kwargs)
def blank_child(self, **kwargs): return LogModFile(**kwargs)
def copy_from(self, other):
if hasattr(other, 'cur_id'): self.cur_id = other.cur_id
self.cur_id = other.id
super(ModVersionBase, self).copy_from(other)
class LogModFile(ModFileBase, db.Model):
__tablename__ = "log_mod_file"
version_id = db.Column(db.Integer, db.ForeignKey('log_mod_version.id'))
version = db.relationship("LogModVersion", back_populates="files")
cur_id = db.Column(db.Integer, db.ForeignKey('mod_file.id'), nullable=True)
current = db.relationship("ModFile")
def blank(self, **kwargs): return LogModFile(**kwargs)
def copy_from(self, other):
if hasattr(other, 'cur_id'): self.cur_id = other.cur_id
self.cur_id = other.id
super(ModFileBase, self).copy_from(other)
|
"""
This module contains models for "logs" which are change submissions for mods.
"""
import datetime
from collections import OrderedDict
from sqlalchemy.orm import backref
from .base import *
from mcarch.model.user import User
from mcarch.app import db
def gen_diffs(mod):
"""
Takes a mod and generates a list of diffs representing the changes made in each log entry.
"""
logs = mod.logs
for i, log in enumerate(logs):
diff = None
if i > 0: diff = logs[i-1].diff(log)
else: diff = LogMod().diff(log)
yield {
'obj': log,
'user': log.user,
'date': log.date,
'diff': diff,
}
def slow_gen_diffs(logs):
"""
Unlike regular `gen_diffs`, this takes a list of logs entries that aren't
all for the same mod, and returns a list of diffs between each one and its
previous version. This is probably much slower than `gen_diffs` since it
queries the database for the entry previous to each one in the list.
"""
for i, log in enumerate(logs):
prev = LogMod.query.filter_by(cur_id=log.cur_id, index=log.index-1).first()
diff = prev.diff(log) if prev else LogMod().diff(log)
yield {
'obj': log,
'user': log.user,
'date': log.date,
'diff': diff,
}
authored_by_table = mk_authored_by_table('log_mod')
for_game_vsn_table = mk_for_game_vsn_table('log_mod_version')
class LogMod(ModBase, db.Model):
"""Represents a change made to a mod."""
__tablename__ = "log_mod"
# The user that made this change.
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=True)
user = db.relationship('User', backref='changes')
# Date this change was made.
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
cur_id = db.Column(db.Integer, db.ForeignKey('mod.id'), nullable=True)
current = db.relationship("Mod", backref=backref("logs", order_by='LogMod.date'))
# Index within this mod's list of versions.
index = db.Column(db.Integer, nullable=False)
authors = db.relationship(
"ModAuthor",
secondary=authored_by_table)
mod_vsns = db.relationship("LogModVersion", back_populates="mod")
def blank(self, **kwargs): return LogMod(**kwargs)
def blank_child(self, **kwargs): return LogModVersion(**kwargs)
def copy_from(self, other):
if hasattr(other, 'cur_id'): self.cur_id = other.cur_id
self.cur_id = other.id
super(ModBase, self).copy_from(other)
class LogModVersion(ModVersionBase, db.Model):
__tablename__ = "log_mod_version"
mod_id = db.Column(db.Integer, db.ForeignKey('log_mod.id'))
mod = db.relationship("LogMod", back_populates="mod_vsns")
game_vsns = db.relationship(
"GameVersion",
secondary=for_game_vsn_table)
files = db.relationship("LogModFile", back_populates="version")
cur_id = db.Column(db.Integer, db.ForeignKey('mod_version.id'), nullable=True)
current = db.relationship("ModVersion")
def blank(self, **kwargs): return LogModVersion(**kwargs)
def blank_child(self, **kwargs): return LogModFile(**kwargs)
def copy_from(self, other):
if hasattr(other, 'cur_id'): self.cur_id = other.cur_id
self.cur_id = other.id
super(ModVersionBase, self).copy_from(other)
class LogModFile(ModFileBase, db.Model):
__tablename__ = "log_mod_file"
version_id = db.Column(db.Integer, db.ForeignKey('log_mod_version.id'))
version = db.relationship("LogModVersion", back_populates="files")
cur_id = db.Column(db.Integer, db.ForeignKey('mod_file.id'), nullable=True)
current = db.relationship("ModFile")
def blank(self, **kwargs): return LogModFile(**kwargs)
def copy_from(self, other):
if hasattr(other, 'cur_id'): self.cur_id = other.cur_id
self.cur_id = other.id
super(ModFileBase, self).copy_from(other)
|
en
| 0.962129
|
This module contains models for "logs" which are change submissions for mods. Takes a mod and generates a list of diffs representing the changes made in each log entry. Unlike regular `gen_diffs`, this takes a list of logs entries that aren't all for the same mod, and returns a list of diffs between each one and its previous version. This is probably much slower than `gen_diffs` since it queries the database for the entry previous to each one in the list. Represents a change made to a mod. # The user that made this change. # Date this change was made. # Index within this mod's list of versions.
| 2.629574
| 3
|
pytos/secureapp/helpers.py
|
geewrd/pytos
| 0
|
6629672
|
import logging
from requests.exceptions import RequestException
from pytos.common.definitions.Url_Params_Builder import URLParamBuilderDict
from pytos.common.exceptions import REST_Not_Found_Error, REST_Client_Error, REST_Unauthorized_Error, \
REST_Bad_Request_Error
from pytos.common.logging.definitions import HELPERS_LOGGER_NAME
from pytos.secureapp.xml_objects.rest import Connection_List, User_List, Applications_List, Services_List, Customers_List, \
Network_Objects_List, Application, User, Single_Service, Group_Service, Basic_Network_Object,\
Range_Network_Object, Host_Network_Object, Subnet_Network_Object, Group_Network_Object, \
Detailed_Application_Connection, Customer, Connections_To_Applications, Connection_To_Application,\
Application_Interfaces, Connection_To_Application_Packs, Detailed_Connection_To_Application_Pack, \
ConnectionExtendedList, VM_Instances
from pytos.securechange.helpers import Secure_Change_Helper
logger = logging.getLogger(HELPERS_LOGGER_NAME)
class Secure_App_Helper(Secure_Change_Helper):
"""
This class is used to interact via HTTP with SecureApp.
It also allows for easy sending of email messages and writing to the SecureChange Message Board.
"""
def __init__(self, hostname, login_data, **kwargs):
"""
:param hostname: The SecureApp hostname with which we will communicate via HTTP.
:type hostname: str
:param login_data: A tuple of (username,password) used for basic authentication with the specified hostname.
:type login_data: tuple
:param message_board_enabled: (Optional) If set to False, Message Board functionality will be disabled.
:type message_board_enabled: bool
"""
logger.debug("Setting up SecureApp Helper.")
self._app_list = Applications_List([])
self._service_list = Services_List([])
self._user_list = User_List([])
self._customers_list = Customers_List([])
super().__init__(hostname, login_data, **kwargs)
def get_user_list(self):
"""
Get the list of currently configured SecureApp users.
:return: The list of currently configured SecureApp users.
:rtype:User_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp users list.")
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/users",
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp users list"
logger.critical(message)
raise IOError(message)
self._user_list = User_List.from_xml_string(response_string)
return self._user_list
def get_user_by_id(self, user_id):
"""
Get the SecureApp user whose ID matches the specified ID.
:param user_id: The ID for the user which will be returned.
:type user_id: int
:return: The user whose ID matches the specified ID.
:rtype:User
:raise ValueError: If an user with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp users with ID '%s'.", user_id)
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/users/{}".format(user_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "User with ID '{}' does not exist.".format(user_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp users list."
logger.critical(message)
raise IOError(message)
return User.from_xml_string(response_string)
def get_user_by_name(self, user_name):
"""
Get the SecureApp user whose name matches the specified name.
:param user_name: The name for the user which will be returned.
:type user_name: name
:return: The user whose name matches the specified name.
:rtype:User
:raise ValueError: If an user with the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp user with name '%s'.", user_name)
self.get_user_list()
for user in self._user_list:
if user.name == user_name:
return user
message = "An user with the name '{}' does not exist.".format(user_name)
logger.critical(message)
raise ValueError(message)
def get_app_by_name(self, app_name, app_domain=None):
"""
Get the SecureApp application whose name matches the specified name.
:param app_name: The name of the application to be returned.
:type app_name: str
:param app_domain: The domain where app resides
:type app_domain: str
:return: The application whose name matches the specified name.
:rtype:Application
:raise ValueError: If an application with the specified name is not found.
"""
if app_domain:
log_msg = "Getting SecureApp application with name '{}' and domain name '{}'.".format(app_name, app_domain)
else:
log_msg = "Getting SecureApp application with name '{}'.".format(app_name)
logger.info(log_msg)
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/applications?name={}".format(app_name),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp application list"
logger.critical(message)
raise IOError(message)
except REST_Not_Found_Error:
message = "An application with the name '{}' does not exist.".format(app_name)
logger.critical(message)
raise ValueError(message)
found_apps = Applications_List.from_xml_string(response_string)
try:
if app_domain:
try:
return [app for app in found_apps if app.customer.name.lower() == app_domain.lower() and app.name == app_name][0]
except (KeyError, AttributeError):
logger.info("No domain found, assuming single domain mode")
return [app for app in found_apps if app.name == app_name][0]
else:
return [app for app in found_apps if app.name == app_name][0]
except IndexError:
message = "An application with the name '{}' does not exist.".format(app_name)
logger.critical(message)
raise ValueError(message)
def get_app_by_id(self, app_id):
"""
Get the SecureApp application whose ID matches the specified ID.
:param app_id: The ID of the application to be returned.
:type app_id: int|str
:return: The application whose ID matches the specified ID.
:rtype:Application
:raise ValueError: If an application with the specified ID is not found.
"""
logger.info("Getting SecureApp application with ID '%s'.", app_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Application with ID {} does nto exist".format(app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp with ID".format(app_id)
logger.critical(message)
raise IOError(message)
return Application.from_xml_string(response_string)
def get_application_list(self):
"""
Get the list of currently configured SecureApp applications.
:return: The currently configured SecureApp applications list.
:rtype:Applications_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp applications list.")
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/applications",
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp application list"
logger.critical(message)
raise IOError(message)
self._app_list = Applications_List.from_xml_string(response_string)
return self._app_list
def get_application_list_by_user_permissions(self, owner=True, editor=True, user_id=None):
"""
Get the list of currently configured SecureApp applications.
:param owner: Applications where user is owner
:type owner: bool
:param editor: Applications where user is editor
:type editor: bool
:param user_id: The user ID who has permissions. If not user for API would be used for filter
:type user_id: str|int
:return: The currently configured SecureApp applications list.
:rtype:Applications_List
:raise IOError: If there was a communication error.
"""
if not user_id:
log_user_id = "used for API call"
else:
log_user_id = user_id
logger.info("Getting SecureApp applications list where user {} is owner({}) and editor({}).".format(
log_user_id, owner, editor
))
filter_params = []
if owner:
filter_params.append("app_owner")
if editor:
filter_params.append("app_editor")
if filter_params:
params = ",".join(filter_params)
query_filter = "?app_permissions={}".format(params)
if user_id:
query_filter += "&userId={}".format(user_id)
else:
query_filter = ""
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/applications{}".format(
query_filter), expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp application list"
logger.critical(message)
raise IOError(message)
return Applications_List.from_xml_string(response_string)
def get_services_list(self, param_builder=None):
"""
Get the list of currently configured SecureApp services.
:param param_builder: Filter parameters
:type param_builder: URLParamBuilderInterface
:return: The currently configured SecureApp services list.
:rtype:Services_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp services list")
url = "/securechangeworkflow/api/secureapp/repository/services"
if param_builder:
url = "{}{}".format(url, param_builder.build())
try:
response_string = self.get_uri(url, expected_status_codes=200).response.content
except RequestException:
raise IOError("Failed to GET SecureApp services list.")
return Services_List.from_xml_string(response_string)
def get_all_services(self, global_service_only=False):
"""
Get the list of currently configured SecureApp services.
:param global_service_only: Retrieve global services
:return: The currently configured SecureApp services list.
:rtype:Services_List
:raise IOError: If there was a communication error.
"""
if global_service_only:
param_dict = {'globals_only': True}
else:
param_dict = {}
param_builder = URLParamBuilderDict(param_dict)
self._service_list = self.get_services_list(param_builder)
return self._service_list
def get_service_list_available_for_app(self, app_id=None, app_name=None, include_global=False):
"""
Get the list of services that are available to be used in specific application:
services created locally in this application and services created globally for all applications
:param app_id: ID of an application
:type app_id: int
:param app_name: Name of the application
:type app_name: str
:param include_global: If to include or not global services
:type include_global: bool
:raise ValueError: If wrong parameters are used
:raise IOError: If there was problem in communication or API request
:return: The list of services
:rtype:Services_List
"""
if not app_id and not app_name:
msg = "Can't get the list of available services for" \
" application as no ID or name of application is provided"
logger.critical(msg)
raise ValueError(msg)
if not app_id:
app_id = self.get_app_by_name(app_name).id
logger.info("Getting all services available for application with ID %s", app_id)
all_services = self.get_all_services()
return [service for service in all_services
if (service.application_id is not None and service.application_id == app_id)
or (service.is_global() and include_global)]
def search_services_available_for_app(self, service_id=None, service_name=None,
app_id=None, app_name=None, include_global=False):
"""
Find services available for application with specified name or ID
:param service_name: The name of the service
:param service_id: The ID of the service
:param app_id: The ID of the application
:param app_name: The name of the application
:param include_global: If to include global services in search
:return: the list of services found with given id or name
"""
if not any((service_id, service_name)):
msg = "No service name or ID provided."
logger.critical(msg)
raise ValueError(msg)
if not app_id and not app_name:
msg = "No application name or ID provided."
logger.critical(msg)
raise ValueError(msg)
if not app_id:
app_id = self.get_app_by_name(app_name).id
if service_name:
service_info = "with name '{}'".format(service_name)
else:
service_info = "with ID {}".format(service_id)
logger.info("Searching for services {} for application with ID {}".format(service_info, app_id))
available_services = self.get_service_list_available_for_app(app_id, app_name, include_global)
if service_name:
return [service for service in available_services
if (service_name.lower() == service.name.lower())]
else:
return [service for service in available_services
if service.id == service_id]
def get_service_by_id(self, service_id):
"""
Get the SecureApp service by ID
:param service_id: The ID of the service to be returned.
:type service_id: str|int
:return: The service whose name matches the specified name.
:rtype:Single_Service|Group_Service
:raise ValueError: If a service with the specified name is not found.
"""
logger.debug("Getting SecureApp service with ID '%s'.", service_id)
# As object does not have attribute, this API will crash. Uncomment when fixed
# try:
# response_string = self.get_uri(
# "/securechangeworkflow/api/secureapp/repository/services/{}".format(service_id),
# expected_status_codes=200).response.content
# except REST_Not_Found_Error:
# message = "Service with ID {} does not exist.".format(service_id)
# logger.critical(message)
# raise ValueError(message)
# except RequestException:
# message = "Failed to get SecureApp service with ID {}.".format(service_id)
# logger.critical(message)
# raise IOError(message)
# return Service_Object.from_xml_string_auto_type(response_string)
try:
return [service for service in self.get_all_services() if str(service.id) == str(service_id)][0]
except IndexError:
message = "Service with ID {} does not exist.".format(service_id)
logger.critical(message)
raise ValueError(message)
def get_service_by_name(self, service_name, param_builder=None):
"""
Get the SecureApp service whose name matches the specified name.
:param service_name: The name of the service to be returned.
:type service_name: str
:param param_builder: The URI parameters builder
:type param_builder: T <= pytos.common.API_Defines.Url_Params_Builder.URLParamBuilderInterface
:return: The service whose name matches the specified name.
:rtype:Single_Service|Group_Service
:raise ValueError: If a service with the specified name is not found.
"""
logger.debug("Getting SecureApp service with name '%s'.", service_name)
if not param_builder:
param_builder = URLParamBuilderDict({'name': service_name})
else:
param_builder.set("name", service_name)
try:
return self.get_services_list(param_builder)[0]
except (IndexError, REST_Not_Found_Error):
message = "A service with the name '{}' does not exist.".format(service_name)
logger.critical(message)
raise ValueError(message)
def get_service_list_for_app_id(self, app_id):
"""
Get the list of services for Application by Application ID
:param app_id: The ID of Application in SecureApp to get services
:type app_id: int
:return: The list of services configured for Application
:rtype:Services_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp service list for application with ID %s", app_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to get SecureApp services list for application with ID {}".format(app_id)
logger.critical(message)
raise IOError(message)
return Services_List.from_xml_string(response_string)
def get_service_list_for_app_name(self, app_name):
"""
Get the list of services for Application by Application name
:param app_name: The Name of the application for provide services list for
:type app_name: str
:return: The list of services configured for Application
:rtype:Services_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp services list for application '%s'", app_name)
app_id = self.get_app_by_name(app_name).id
return self.get_service_list_for_app_id(app_id)
def get_connections_list_for_app_id(self, app_id):
"""
Get the SecureApp connections list for the application whose ID matches the specified ID.
:param app_id: The ID of the application whose connections will be returned.
:type app_id: int
:return: The connections list for the application whose ID matches the specified ID.
:rtype:Connection_List
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp connections list for application with ID '%s'.", app_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Application with ID {0} does not exist.".format(app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to get SecureApp connections list for application with ID {}.".format(app_id)
logger.critical(message)
raise IOError(message)
return Connection_List.from_xml_string(response_string)
def get_extended_connections_list_for_app_id(self, app_id):
"""
Get extended connections (with all information)
:return:
"""
logger.info("Getting SecureApp connections with details for application with ID {}".format(app_id))
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections_extended".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
msg = "Application with ID does not exists".format(app_id)
logger.critical(msg)
raise ValueError(msg)
except RequestException:
msg = "Failed to get SecureApp connections list for application with ID {}".format(app_id)
logger.critical(msg)
raise IOError(msg)
return ConnectionExtendedList.from_xml_string(response_string)
def get_connections_list_for_app_name(self, app_name):
"""
Get the SecureApp connection list for the application whose name matches the specified name.
:param app_name: The name of the application whose connection list will be returned.
:type app_name: str
:return: The connections list for the application whose name matches the specified name.
:rtype:Connection_List
:raise ValueError: If an application with the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp connections list for application with name '%s'.", app_name)
app_id = self.get_app_by_name(app_name).id
return self.get_connections_list_for_app_id(app_id)
def get_connection_by_name_for_app_id(self, app_id, connection_name):
"""
Get the SecureApp connection by name for application with specified ID
:param app_id: The ID of application to search connection from
:type app_id: int
:param connection_name: The name of the connection to be returned.
:type connection_name: str
:return: The connection whose name matches the specified name
:rtype:Detailed_Application_Connection
:raise ValueError: If connection with the specified ID is not found.
"""
logger.debug("Getting SecureApp Connection with name '%s' "
"from application with ID %s.", connection_name, app_id)
connection_list = self.get_connections_list_for_app_id(app_id)
for connection in connection_list:
if connection.name.lower() == connection_name.lower():
return connection
message = "A connection with the name '{}' does not exist in application with ID {}.".format(
connection_name,
app_id)
logger.critical(message)
raise ValueError(message)
def get_connection_by_name_for_app_name(self, app_name, connection_name):
"""
Get the SecureApp connection by name for application with specified ID
:param app_name: The name of application to search connection from
:type app_name: str
:param connection_name: The name of the connection to be returned.
:type connection_name: str
:return: The connection whose name matches the specified name
:rtype:Detailed_Application_Connection
:raise ValueError: If connection with the specified ID is not found.
"""
logger.debug("Getting SecureApp Connection with name '%s' "
"from application '%s'.", connection_name, app_name)
app_id = self.get_app_by_name(app_name).id
return self.get_connection_by_name_for_app_id(app_id, connection_name)
def get_network_objects_list_for_app_by_id(self, app_id):
"""
Get the list of network objects for SecureApp application by application ID.
:param app_id: Application ID
:type app_id: int
:return: The list of network objects for the specified application.
:rtype:Network_Objects_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting Network objects list for SecureApp application '%s'.", app_id)
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository"
"/applications/{}/network_objects".format(app_id),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET network objects list for SecureApp application with id '{}'".format(app_id)
logger.critical(message)
raise IOError(message)
try:
network_objects_list = Network_Objects_List.from_xml_string(response_string)
except (ValueError, AttributeError):
message = "Failed to get network objects list for application with id '{}'".format(app_id)
logger.critical(message)
raise ValueError(message)
return network_objects_list
def get_all_network_objects(self):
"""
Get the list of all network objects in SecureApp .
:return: The list of all network objects in SecureApp.
:rtype: Network_Objects_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting network objects list for SecureApp.")
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository"
"/network_objects", expected_status_codes=200).response.content
except RequestException:
message = "Failed to get network objects list for SecureApp."
logger.critical(message)
raise IOError(message)
try:
network_objects_list = Network_Objects_List.from_xml_string(response_string)
except (ValueError, AttributeError):
message = "Failed to get network objects list for SecureApp."
logger.critical(message)
raise ValueError(message)
return network_objects_list
def get_network_objects_list_for_app_name(self, app_name):
"""
Get the SecureApp network objects list for the application whose name matches the specified name.
:param app_name: The name of the application whose network object list will be returned.
:type app_name: str
:return: The network objects list for the application whose name matches the specified name.
:rtype:Network_Objects_List
:raise ValueError: If an application with the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting network objects list for application with name '%s'.", app_name)
app_id = self.get_app_by_name(app_name).id
return self.get_network_objects_list_for_app_by_id(app_id)
def get_network_object_by_name_for_app_id(self, network_object_name, app_id):
"""
Get the SecureApp network object whose name matches the specified name for the application whose ID matches the
specified ID.
:param app_id: The ID of the application whose network objects will be returned.
:type app_id: int
:param network_object_name: The name of the network object which will be returned.
:type network_object_name: str
:return: The network object whose name matches the specified name for the application whose
ID matches the specified ID.
:rtype:Network_Object_DNS_Host|Network_Object_IP_Address
:raise ValueError: If an application with the specified ID is not found and/or a network object with
the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting network object '%s'.", network_object_name)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/network_objects".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Application with ID '{}' does not exist.".format(app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp network objects list for application with ID {}.".format(app_id)
logger.critical(message)
raise IOError(message)
for network_object in Network_Objects_List.from_xml_string(response_string):
if network_object.name == network_object_name:
return network_object
message = "Could not find network object with name '{}' for application with ID {}.".format(network_object_name,
app_id)
logger.critical(message)
raise ValueError(message)
def get_network_object_by_id_for_app_id(self, network_object_id, app_id):
"""
Get the SecureApp network object whose id matches the specified id for the application whose ID matches the
specified ID.
:param app_id: The ID of the application whose network objects will be returned.
:type app_id: int
:param network_object_id: The id of the network object which will be returned.
:type network_object_id: int
:return: The network object whose id matches the specified id for the application whose
ID matches the specified ID.
:rtype:Network_Object_DNS_Host|Network_Object_IP_Address
:raise ValueError: If an application with the specified ID is not found and/or a network object with
the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting network object with id '%s'.", network_object_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/network_objects".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Application with ID '{}' does not exist.".format(app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp network objects list for application with ID {}.".format(app_id)
logger.critical(message)
raise IOError(message)
for network_object in Network_Objects_List.from_xml_string(response_string):
if network_object.id == network_object_id:
return network_object
message = "Could not find network object with id '{}' for application with ID {}.".format(network_object_id,
app_id)
logger.critical(message)
raise ValueError(message)
def post_apps(self, apps):
"""
Create the specified SecureApp application object/objects in SecureApp.
:param apps: The application object/objects to create in SecureApp.
:type apps:Application or list of Application
:rtype: bool
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
:return: The ID of the created application.
If more than one object is created, (True, None) is returned.
"""
logger.info("Creating SecureApp applications.")
app_list = Applications_List([])
# Handle a list of apps
if isinstance(apps, list):
app_list.extend(apps)
expected_status_code = [200, 201]
if len(apps) == 0:
message = "The list of applications to create is empty."
logger.critical(message)
raise ValueError(message)
elif isinstance(apps, Applications_List):
app_list.extend(apps)
expected_status_code = [200, 201]
if len(apps) == 0:
message = "The list of applications to create is empty."
logger.critical(message)
raise ValueError(message)
elif isinstance(apps, Application):
app_list.append(apps)
expected_status_code = 201
else:
message = "The provided parameter must be a list of applications, " \
"Secure_App.XML_Objects.REST.Applications_List, or Application"
logger.critical(message)
raise ValueError(message)
try:
response = self.post_uri("/securechangeworkflow/api/secureapp/repository/applications/",
app_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
if expected_status_code == 201:
app_id = response.get_created_item_id()
return app_id
return True
except RequestException as error:
message = "Could not create the following applications: '{}', error was '{}'.".format(
[app.name for app in app_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following applications: '{}', error was '{}'.".format(
[app.name for app in app_list], error)
logger.critical(message)
raise ValueError(message)
def update_app(self, apps, customer_name=None):
"""
Update the specified SecureApp application object/objects in SecureApp.
:param apps: The application object/objects to be updated in SecureApp.
:type apps:Application or list of Application
:return: Returns True or False if updated/not updated
:rtype: bool
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
If more than one object is created, (True, None) is returned.
"""
logger.info("Creating SecureApp applications.")
app_list = Applications_List([])
expected_status_code = [200, 201]
# Handle a list of apps
if isinstance(apps, list):
app_list.extend(apps)
if len(apps) == 0:
message = "The list of applications to update is empty."
logger.critical(message)
raise ValueError(message)
elif isinstance(apps, Applications_List):
app_list.extend(apps)
if len(apps) == 0:
message = "The list of applications to update is empty."
logger.critical(message)
raise ValueError(message)
elif isinstance(apps, Application):
app_list.append(apps)
else:
message = "The provided parameter must be a list of applications, " \
"Secure_App.XML_Objects.REST.Applications_List, or Application"
logger.critical(message)
raise ValueError(message)
# BUG: around for current bug that will return 200 if id is not specified but application will not be updated
for app in app_list:
if not app.id:
try:
app.id = self.get_app_by_name(app.name, customer_name).id
except (ValueError, AttributeError, IOError):
message = "Failed to get id for application '{}'.".format(app.name)
logger.critical(message)
raise ValueError(message)
try:
self.put_uri("/securechangeworkflow/api/secureapp/repository/applications/",
app_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
return True
except RequestException as error:
message = "Could not update the following applications: '{}', error was '{}'.".format(
[app.name for app in app_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not update the following applications: '{}', error was '{}'.".format(
[app.name for app in app_list], error)
logger.critical(message)
raise ValueError(message)
def delete_apps(self, apps):
"""
Delete the specified SecureApp application object/objects in SecureApp.
:param apps: The application object/objects to create in SecureApp.
:type apps:Application|Applications_List|list[Application]
:return: True if the application creation was successful.
:rtype: bool
:raise ValueError: If the specified application does not exist or there was another problem with the parameters.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting applications from SecureApp.")
# Handle a list of apps
if isinstance(apps, list):
if len(apps) == 0:
raise ValueError("The list of applications to delete is empty.")
else:
for app in apps:
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/applications/{}".format(app.id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete the following applications: '{}', error was '{}'.".format(
[app.name for app in apps], error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete the following applications: '{}', error was '{}'.".format(
[app.name for app in apps], error)
logger.critical(message)
raise IOError(message)
return True
# Handle Applications_List
elif isinstance(apps, Applications_List):
if len(apps) == 0:
raise ValueError("The applications list to delete is empty.")
else:
for app in apps:
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/applications/{}".format(app.id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete the following applications: '{}', error was '{}'.".format(
[app.name for app in apps], error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete the following applications: '{}', error was '{}'.".format(
[app.name for app in apps], error)
logger.critical(message)
raise IOError(message)
return True
# Handle Application
elif isinstance(apps, Application):
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/applications/{}".format(apps.id),
expected_status_codes=200)
return True
except REST_Client_Error as error:
message = "Could not delete the following application: '{}', error was '{}'.".format(
apps.name, error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete the following application: '{}', error was '{}'.".format(
apps.name, error)
logger.critical(message)
raise IOError(message)
else:
raise ValueError(
'The provided parameter must be a list of applications,Applications_List,'
' or Application')
def delete_app_by_id(self, app_id):
"""
Delete the SecureApp application with the specified ID.
:param app_id: The ID of the application to be deleted.
:type app_id: int
:return: True if successful.
:rtype: bool
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting application with ID '%s' from SecureApp.", app_id)
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/applications/{}".format(app_id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete application with ID : '{}', error was '{}'.".format(app_id, error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete application with ID : '{}', error was '{}'.".format(app_id, error)
logger.critical(message)
raise IOError(message)
return True
def delete_app_by_name(self, app_name):
"""
Delete the SecureApp application with the specified name.
:param app_name: The name of the application to be deleted.
:type app_name: str
"""
app_id = self.get_app_by_name(app_name).id
return self.delete_app_by_id(app_id)
def delete_all_apps(self):
"""
Delete all configured SecureApp applications.
:return: True if successful.
:rtype: bool
:raise IOError: If there was a communication error.
"""
logger.info("Deleting all existing applications from SecureApp.")
self._app_list = self.get_application_list()
for app in self._app_list:
try:
self.delete_app_by_id(app.id)
except (RequestException, ValueError) as delete_error:
message = "Could not delete application with ID : '{}', error was '{}'.".format(app.id, delete_error)
logger.critical(message)
raise IOError(message)
return True
def create_users(self, users):
"""
Create the specified SecureApp user object/objects in SecureApp.
:param users: The user object/objects to create in SecureApp.
:type users:User_List|User|list[User]
:return: The ID of the created user.
If more than one object is created, True is returned.
:rtype: bool
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
"""
logger.info("Creating SecureApp users.")
users_list = User_List([])
# Handle a list of users
if isinstance(users, list):
if len(users) == 0:
message = "The list of users to create is empty."
logger.critical(message)
raise ValueError(message)
else:
users_list.extend(users_list)
if len(users) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(users, User_List):
if len(users) == 0:
message = "The list of users to create is empty."
logger.critical(message)
raise ValueError(message)
else:
users_list.extend(users)
if len(users) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(users, User):
users_list.append(users)
expected_status_code = 201
else:
raise ValueError(
'The provided parameter must be a list of users,User_List, '
'or User')
try:
response = self.post_uri("/securechangeworkflow/api/secureapp/repository/users/",
users_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
if expected_status_code == 201:
user_id = response.get_created_item_id()
return user_id
return None
except RequestException as error:
message = "Could not create the following users: '{}', error was '{}'.".format(
[user.name for user in users_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following users: '{}', error was '{}'.".format(
[user.name for user in users_list], error)
logger.critical(message)
raise ValueError(message)
def delete_user_by_id(self, user_id):
"""
Delete the SecureApp user with the specified ID.
:param user_id: The ID of the user to be deleted.
:type user_id: int
:return: True if successful.
:rtype: bool
:raise ValueError: If a user with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting user with ID '%s' from SecureApp.", user_id)
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/users/{}".format(user_id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete user with ID {}, error was '{}'.".format(user_id, error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete user with ID {}, error was '{}'.".format(user_id, error)
logger.critical(message)
raise IOError(message)
return True
def delete_user_by_name(self, user_name):
"""
Delete the SecureApp user with the specified name.
:param user_name: The name of the user to be deleted.
:type user_name: string
:return: True if successful.
:rtype: bool
:raise ValueError: If a user with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting user with name '%s' from SecureApp.", user_name)
user_id = self.get_user_by_name(user_name).id
return self.delete_user_by_id(user_id)
def post_services(self, services, app_id=None, app_name=None):
"""
Create the specified SecureApp services in SecureApp,
if application id or name are specified then services will be posted to this application
:param services: The services object/objects to create in SecureApp.
:type services:Single_Service|Group_Service|list[Single_Service]|list[Group_Service]|Services_List
:param app_id: The ID of application.
:type app_id: int
:param app_name: The name of the application.
:type app_name: str
:return: If the object creation was successful and only object was created,
return the ID of the created service.
If more than one object is created, (True, None) is returned.
:rtype: int
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
"""
info = "Creating SecureApp services"
if app_id:
info += " for application with ID {}".format(app_id)
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id)
elif app_name:
info += " for application '{}'".format(app_name)
app_id = self.get_app_by_name(app_name).id
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id)
else:
info += "."
url = "/securechangeworkflow/api/secureapp/repository/services/"
logger.info(info)
services_list = Services_List([])
# Handle a list of services
if isinstance(services, list):
if len(services) == 0:
message = "The list of services to create is empty."
logger.critical(message)
raise ValueError(message)
else:
services_list.extend(services)
if len(services) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(services, Services_List):
if len(services) == 0:
message = "The list of services to create is empty."
logger.critical(message)
raise ValueError(message)
else:
services_list.extend(services)
if len(services) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(services, (Single_Service, Group_Service)):
services_list.append(services)
expected_status_code = 201
else:
raise ValueError(
"The provided parameter must be a list of services,Services_List, "
"Secure_App.XML_Objects.REST.Single_Service or Group_Service")
try:
response = self.post_uri(url,
services_list.to_xml_string().encode(),
expected_status_codes=expected_status_code)
if expected_status_code == 201:
service_id = response.get_created_item_id()
return service_id
return True
except RequestException as error:
message = "Could not create the following services: '{}', error was '{}'".format(
[service.name for service in services_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following services: '{}', error was '{}'".format(
[service.name for service in services_list], error)
logger.critical(message)
raise ValueError(message)
def update_services(self, services, app_id=None, app_name=None):
"""
Update the specified SecureApp services in SecureApp,
if application ID or name are specified then services will be updated for this application
:param services: The services object/objects to update in SecureApp for application.
:type services:Single_Service|Group_Service|[Single_Service]|[Group_Service]|Services_List
:param app_id: The Application ID.
:type app_id: int
:param app_name: The Application name
:type app_name: str
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
"""
info = "Updating services for SecureApp"
if app_id:
info += " for application with ID {}".format(app_id)
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id)
elif app_name:
info += " for application '{}'".format(app_name)
app_id = self.get_app_by_name(app_name).id
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id)
else:
info += "."
url = "/securechangeworkflow/api/secureapp/repository/services/"
logger.info(info)
services_list = Services_List([])
# Handle a list of services
if isinstance(services, list):
if len(services) == 0:
message = "The list of services to update is empty."
logger.critical(message)
raise ValueError(message)
else:
services_list.extend(services)
elif isinstance(services, Services_List):
if len(services) == 0:
message = "The list of services to update is empty."
logger.critical(message)
raise ValueError(message)
else:
services_list.extend(services)
elif isinstance(services, (Single_Service,
Group_Service)):
services_list.append(services)
else:
raise ValueError(
"The provided parameter must be a list of services,Services_List, "
"Secure_App.XML_Objects.REST.Single_Service or Group_Service")
try:
self.put_uri(url,
services_list.to_xml_string().encode(),
expected_status_codes=200)
except RequestException as error:
message = "Could not update the following services: '{}', error was '{}'".format(
[service.name for service in services_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not update the following services: '{}', error was '{}'".format(
[service.name for service in services_list], error)
logger.critical(message)
raise ValueError(message)
def delete_service_by_name(self, service_name):
"""
Delete the SecureApp service with the specified name.
:param service_name: The name of the SecureApp service that will be deleted.
:type service_name: str
:return: True if the service deletion was successful.
:rtype: bool
:raise ValueError: If the specified service does not exist or there was another problem with the parameters.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting service with name '%s' from SecureApp.", service_name)
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/services?name={}".format(service_name),
expected_status_codes=200)
except RequestException as error:
message = "Could not delete the service: '{}', error was '{}'".format(service_name, error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not delete the service: '{}', error was '{}'".format(service_name, error)
logger.critical(message)
raise ValueError(message)
return True
def delete_local_service(self, app_id, service_id):
"""Delete local service with in application in SecureApp.
:param app_id: The id of the application
:param service_id: The local service id
:return: True if the service deletion was successful.
:raise ValueError: If the specified service does not exist or there was another problem with the parameters.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting local service id '{}' for application id '{}'".format(app_id, service_id))
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services/{}".format(app_id, service_id)
try:
self.delete_uri(url, expected_status_codes=200)
except RequestException as error:
message = "Could not delete service with ID: '{}', error was '{}'".format(service_id, error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not delete service with ID: '{}', error was '{}'".format(service_id, error)
logger.critical(message)
raise ValueError(message)
return True
def delete_service_by_id(self, service_id):
"""
Delete the SecureApp service with the specified ID.
:param service_id: The ID of the service to be deleted.
:type service_id: int
:return: True if successful.
:rtype: bool
:raise ValueError: If an service with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting service with ID '%s' from SecureApp.", service_id)
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/services/{}".format(service_id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete the service with ID '{}', error was '{}'".format(service_id, error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete the service with ID '{}', error was '{}'".format(service_id, error)
logger.critical(message)
raise IOError(message)
return True
def create_network_objects_for_app_id(self, app_id, network_objects):
"""
Create the specified network objects for the application with the specified ID.
:param app_id: The ID of the application that network objects will be created for.
:type app_id: int
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
:return: The ID of the created network object.
If more than one object is created, True is returned.
"""
logger.info("Creating network objects for application with ID '%s'.", app_id)
network_objects_list = Network_Objects_List([])
# Handle a list of network objects
if isinstance(network_objects, list):
if len(network_objects) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
network_objects_list.extend(network_objects)
if len(network_objects) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(network_objects, Network_Objects_List):
if len(network_objects) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
network_objects_list.extend(network_objects)
if len(network_objects_list) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(network_objects, (
Basic_Network_Object, Range_Network_Object,
Host_Network_Object, Subnet_Network_Object,
Group_Network_Object)):
network_objects_list.append(network_objects)
expected_status_code = 201
else:
raise ValueError(
"The provided parameter must be a list of network objects, "
"Secure_App.XML_Objects.REST.Network_Objects_List,Basic_Network_Object, "
"Secure_App.XML_Objects.REST.Range_Network_Object,Host_Network_Object, "
"Secure_App.XML_Objects.REST.Subnet_Network_Object or Group_Network_Object")
try:
response = self.post_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{0}/network_objects".format(app_id),
network_objects_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
if expected_status_code == 201:
network_object_id = response.get_created_item_id()
return network_object_id
return True
except RequestException as error:
message = "Could not create the following network objects: '{}', error was '{}'".format(
[network_object.name for network_object in network_objects_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following network objects: '{}', error was '{}'".format(
[network_object.name for network_object in network_objects_list], error)
logger.critical(message)
raise ValueError(message)
def update_network_objects_for_app_id(self, app_id, network_objects):
"""
Update the specified network objects for the application with the specified ID.
:param app_id: The ID of the application that network objects will be updated for.
:type app_id: int
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
:return: If success true is returned.
:rtype: bool
"""
logger.info("Updating network objects for application with ID '%s'.", app_id)
network_objects_list = Network_Objects_List([])
expected_status_code = 200
# Handle a list of network objects
if isinstance(network_objects, list):
if len(network_objects) == 0:
message = "The list of network objects to update is empty."
logger.critical(message)
raise ValueError(message)
else:
network_objects_list.extend(network_objects)
elif isinstance(network_objects, Network_Objects_List):
if len(network_objects) == 0:
message = "The list of network objects to update is empty."
logger.critical(message)
raise ValueError(message)
else:
network_objects_list.extend(network_objects)
elif isinstance(network_objects, (
Basic_Network_Object, Range_Network_Object,
Host_Network_Object, Subnet_Network_Object,
Group_Network_Object)):
network_objects_list.append(network_objects)
else:
raise ValueError(
"The provided parameter must be a list of network objects, "
"Secure_App.XML_Objects.REST.Network_Objects_List,Basic_Network_Object, "
"Secure_App.XML_Objects.REST.Range_Network_Object,Host_Network_Object, "
"Secure_App.XML_Objects.REST.Subnet_Network_Object or Group_Network_Object")
for network_object in network_objects_list:
if not any((network_object.id, network_object.name)):
message = "One of the network objects does not have neither name nor id"
raise ValueError(message)
elif not network_object.id:
try:
network_object.id = self.get_network_object_by_name_for_app_id(network_object.name, app_id)
except (ValueError, AttributeError, IOError):
message = "Failed to get id for a network object '{}'".format(network_object.name)
logger.critical(message)
raise ValueError(message)
elif not network_object.name:
try:
network_object.name = self.get_network_object_by_id_for_app_id(network_object.id, app_id)
except (ValueError, AttributeError, IOError):
message = "Failed to get name for a network object with id '{}'".format(network_object.id)
logger.critical(message)
raise ValueError(message)
else:
continue
try:
self.put_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/network_objects".format(app_id),
network_objects_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
return True
except RequestException as error:
message = "Could not create the following network objects: '{}', error was '{}'".format(
[network_object.name for network_object in network_objects_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following network objects: '{}', error was '{}'".format(
[network_object.name for network_object in network_objects_list], error)
logger.critical(message)
raise ValueError(message)
def create_network_objects_for_app_name(self, app_name, network_objects):
"""
Create the specified network objects for the application with the specified name.
:param app_name: The ID of the application that network objects will be created for.
:type app_name: str
:return: If the object creation was successful and only object was created, return is (True, object_id), where
object_id is the ID of the created object that is extracted from the Location header.
If more than one object is created, (True, None) is returned.
"""
app_id = self.get_app_by_name(app_name).id
return self.create_network_objects_for_app_id(app_id, network_objects)
def update_network_objects_for_app_name(self, app_name, network_objects):
"""
Update the specified network objects for the application with the specified name.
:param app_name: The ID of the application that network objects will be updated for.
:type app_name: str
:return: If the object update was successful True is returned
"""
app_id = self.get_app_by_name(app_name).id
return self.update_network_objects_for_app_id(app_id, network_objects)
def create_connections_for_app_id(self, app_id, connections):
"""
Create the specified network objects for the application with the specified ID.
:param app_id: The ID of the application that connections will be created for.
:type app_id: int
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
:return: The ID of the created connection.
If more than one object is created, True is returned.
"""
logger.info("Creating network objects for application with ID '%s'.", app_id)
connection_list = Connection_List([])
# Handle a list of services
if isinstance(connections, list):
if len(connections) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
connection_list.extend(connections)
if len(connection_list) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(connections, Connection_List):
if len(connections) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
connection_list.extend(connections)
if len(connection_list) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(connections, Detailed_Application_Connection):
connection_list.append(connections)
expected_status_code = 201
else:
raise ValueError(
"The provided parameter must be a list of connections objects, "
"Secure_App.XML_Objects.REST.Connection_List,"
"Detailed_Application_Connection")
try:
response = self.post_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections".format(app_id),
connection_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
if expected_status_code == 201:
connection_id = response.get_created_item_id()
return connection_id
return True
except RequestException as error:
message = "Could not create the following connections: '{}', error was '{}'.".format(
[connection.name for connection in connection_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following connections: '{}', error was '{}'.".format(
[connection.name for connection in connection_list], error)
logger.critical(message)
raise ValueError(message)
def create_connections_for_app_name(self, app_name, connections):
"""
Create the specified connections objects for the application with the specified name.
:param app_name: The ID of the application that connections will be created for.
:type app_name: str
:return: If the object creation was successful and only object was created, return is (True, object_id), where
object_id is the ID of the created object that is extracted from the Location header.
If more than one object is created, (True, None) is returned.
"""
logger.info("Creating connections for application '%s'", app_name)
app_id = self.get_app_by_name(app_name).id
return self.create_connections_for_app_id(app_id, connections)
def update_connection_for_app_id(self, connection, app_id=None, app_name=None):
"""
Update existing connection with new one. Or ID or name of
application the connection belongs to should be provided
:param connection: The new connection to update with
:type connection:Detailed_Application_Connection
:param app_id: The Id of application to update connection for
:type app_id: int
:param app_name: The name of application to update connection for
:type app_name: str
:raise IOError: If there is communication or API error
:raise ValueError: if one of the parameters is wrong one
"""
if not (app_id or app_name):
msg = "No ID or name of application of connection to update is provided"
logger.critical(msg)
raise ValueError(msg)
elif not app_id:
app_id = self.get_app_by_name(app_name).id
if not connection.id and not connection.name:
msg = "No ID or name of connection to update is provided"
logger.critical(msg)
raise ValueError(msg)
elif not connection.id:
logger.info("Updating connection '{}' "
"for application with ID {}".format(connection.name,
app_id))
connection.id = self.get_connection_by_name_for_app_id(app_id, connection.name).id
else:
logger.info("Updating connection with ID {} "
"for application with ID {}".format(connection.id,
app_id))
try:
self.put_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections/{}".format(
app_id,
connection.id),
connection.to_xml_string().encode(),
expected_status_codes=200)
except RequestException:
message = "Failed to update connection with ID {}" \
" for application with ID {}".format(connection.id,
app_id)
logger.critical(message)
raise IOError(message)
def update_connections_for_app(self, connections, app_id=None, app_name=None):
"""
Update the specified network objects for the application with the specified ID.
:param app_id: The ID of the application that connections will be created for.
:type app_id: int
:param app_name: The name of the application that connections will be created for.
:type app_name: str
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
If more than one object is created, True is returned.
"""
if not app_id and not app_name:
msg = "ID or name of application to update connections for is not provided"
logger.critical(msg)
raise ValueError(msg)
elif not app_id:
app_id = self.get_app_by_name(app_name).id
logger.info("Updating network objects for application with ID '%s'.", app_id)
connection_list = Connection_List([])
# Handle a list of services
if isinstance(connections, list):
if len(connections) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
connection_list.extend(connections)
elif isinstance(connections, Connection_List):
if len(connections) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
connection_list.extend(connections)
elif isinstance(connections, Detailed_Application_Connection):
connection_list.append(connections)
else:
raise ValueError(
"The provided parameter must be a list of connections objects, "
"Secure_App.XML_Objects.REST.Connection_List,"
"Detailed_Application_Connection")
try:
self.put_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections".format(app_id),
connection_list.to_xml_string().encode(), expected_status_codes=200)
return True
except RequestException as error:
message = "Could not update the following connections: '{}', error was '{}'.".format(
[connection.name for connection in connection_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not update the following connections: '{}', error was '{}'.".format(
[connection.name for connection in connection_list], error)
logger.critical(message)
raise ValueError(message)
def delete_connection_by_id_for_app_id(self, app_id=None, app_name=None, connection_id=None, connection_name=None):
"""
Delete connection by it's ID from application of provided application ID
:param app_id: Application ID to delete connection from
:type app_id: int
:param app_name: Application name
:type app_name: str
:param connection_id: Connection ID to delete from Application
:type connection_id: int
:param connection_name: Connection name
:type connection_name: str
:raise IOError: If there was communication error.
"""
if not app_id and not app_name:
ValueError("Failed to delete connection, as no application ID or name specified")
elif not app_id:
app_id = self.get_app_by_name(app_name).id
if not connection_id and not connection_name:
ValueError("Failed to delete connection, no connection ID or name specified")
elif not connection_id:
connection_id = self.get_connection_by_name_for_app_id(app_id, connection_name).id
logger.info("Deleting Connection with ID %s from application with ID %s", connection_id, app_id)
try:
self.delete_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections/{}".format(
app_id,
connection_id),
expected_status_codes=200)
except RequestException:
message = "Failed to delete connection with ID {} from SecureApp for Application with ID {}".format(
connection_id,
app_id)
logger.critical(message)
raise IOError(message)
def delete_all_connections_for_app(self, app_id=None, app_name=None):
"""
Delete all connections of specified application(by ID or name)
:param app_id:
:param app_name:
:raise IOError: If there were communication problems
:raise ValueError: If no app was found or wrong paramaters passed
"""
if not app_id and not app_name:
raise ValueError("Can't delete connections as no application name or id specified")
if not app_id:
app_id = self.get_app_by_name(app_name).id
logger.info("Deleting all connection from application with ID %s", app_id)
connections = [connection for connection in self.get_connections_list_for_app_id(app_id)]
if connections:
deleted_connections = []
for connection in connections:
try:
self.delete_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections/{}".format(
app_id,
connection.id),
expected_status_codes=200)
except RequestException as error:
connections_names = (con.name for con in connections)
if deleted_connections:
message = "Failed to delete all connections. Deleted '{}' out of '{}'." \
" Got error on connection '{}': {}".format(
deleted_connections,
connections_names,
connection.name,
error)
else:
message = "Failed to delete connections '{}'. Got error on connection '{}': {}".format(
connections_names,
connection.name,
connections_names)
logger.critical(message)
raise IOError(message)
else:
deleted_connections.append(connection.name)
def get_connections_to_applications(self, app_id):
"""
Get connections to application for application
:param app_id: Application ID
:type app_id: str|int
:return: Connections_To_Applications
"""
logger.info("Getting Connections to application for Application with ID {}".format(app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/connections_to_applications".format(
app_id)).response.content
except RequestException:
message = "Failed to get connections to application for app with ID {}".format(app_id)
logger.critical(message)
raise IOError(message)
return Connections_To_Applications.from_xml_string(response_string)
def get_connection_to_application(self, app_id, conn_to_app_id):
"""
Get connection to application by ID for application
:param app_id: Application ID
:type app_id: str|int
:param conn_to_app_id: Id of the connection to application
:type conn_to_app_id: str|int
:return: Connections_To_Applications
"""
logger.info("Getting Connection to Application with ID {} for Application with ID {}".format(
conn_to_app_id, app_id))
try:
response_string = self.get_uri("securechangeworkflow/api/secureapp/"
"repository/applications/{}/connections_to_applications/{}".format(
app_id, conn_to_app_id)).response.content
except REST_Not_Found_Error:
message = "Connection to Application with ID '{}' does not exist in Application with ID {}.".format(
conn_to_app_id, app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to get connection to application with ID {} for app with ID {}".format(
conn_to_app_id, app_id)
logger.critical(message)
raise IOError(message)
return Connection_To_Application.from_xml_string(response_string)
def get_application_interfaces(self, app_id):
"""
Get application interfaces for application
:param app_id: Application ID
:type app_id: str|int
:return: Application_Interfaces
"""
logger.info("Getting appplication interfaces for Application with ID {}".format(app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/application_interfaces".format(
app_id)).response.content
except RequestException:
message = "Failed to get connections to application for app with ID {}".format(app_id)
logger.critical(message)
raise IOError(message)
return Application_Interfaces.from_xml_string(response_string)
def get_application_interface(self, app_id, app_interface_id):
"""
Get application interfaces by ID for application
:param app_id: Application ID
:type app_id: str|int
:param app_interface_id: Application Interface ID
:type app_interface_id: int|str
:return: Application_Interface
"""
logger.info("Getting Application Interface with ID {} for Application with ID {}".format(
app_interface_id, app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/application_interfaces/{}".format(
app_id, app_interface_id)).response.content
except REST_Not_Found_Error:
message = "Application Interface with ID '{}' does not exist in Application with ID {}.".format(
app_interface_id, app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to get Application Interface with ID {} for app with ID {}".format(
app_interface_id, app_id)
logger.critical(message)
raise IOError(message)
return Connection_To_Application.from_xml_string(response_string)
def get_connection_to_application_pack(self, app_id, con_to_app_pack_id):
"""
Get connection to application packs for application
:param app_id: Application ID
:type app_id: str|int
:param con_to_app_pack_id: ID of the connection to Application Pack
:type con_to_app_pack_id: int|str
:return: Connection_To_Application_Pack
"""
logger.info("Getting connection to applicaton pack {} for Application with ID {}".format(
con_to_app_pack_id, app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/connection_to_application_packs/{}".format(
app_id, con_to_app_pack_id)).response.content
except REST_Not_Found_Error:
message = "Connection to application pack with ID '{}' does not exist in Application with ID {}.".format(
con_to_app_pack_id, app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to get connection to applicaton pack {} for app with ID {}".format(
con_to_app_pack_id, app_id)
logger.critical(message)
raise IOError(message)
return Detailed_Connection_To_Application_Pack.from_xml_string(response_string)
def get_connection_to_application_packs(self, app_id):
"""
Get connection to application packs for application
:param app_id: Application ID
:type app_id: str|int
:return: Connection_To_Application_Packs
"""
logger.info("Getting connection to applicaton packs for Application with ID {}".format(app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/connection_to_application_packs".format(
app_id)).response.content
except RequestException:
message = "Failed to get connection to applicaton packs for app with ID {}".format(app_id)
logger.critical(message)
raise IOError(message)
return Connection_To_Application_Packs.from_xml_string(response_string)
def get_customers(self):
"""
Get the list of currently configured SecureApp customers.
:return: The list of currently configured SecureApp customers.
:rtype:Customers_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp customers list.")
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/customers",
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp customers list"
logger.critical(message)
raise IOError(message)
self._customers_list = Customers_List.from_xml_string(response_string)
return self._customers_list
def get_customer_by_id(self, customer_id):
"""
Get the SecureApp customer whose ID matches the specified ID.
:param customer_id: The ID for the customer which will be returned.
:type customer_id: int
:return: The customer whose ID matches the specified ID.
:rtype:Customer
:raise ValueError: If an customer with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp customer with ID '%s'.", customer_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/customers/{}".format(customer_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Customer with ID '{}' does not exist.".format(customer_id)
logger.critical(message)
raise ValueError(message)
except REST_Bad_Request_Error:
message = "Failed to GET SecureApp customer. Check if you are not in a single mode".format(customer_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp customer."
logger.critical(message)
raise IOError(message)
return Customer.from_xml_string(response_string)
def get_customer_by_name(self, customer_name):
"""
Get the SecureApp customer whose name matches the specified name.
:param customer_name: The name for the customer which will be returned.
:type customer_name: str
:return: The customer whose name matches the specified name.
:rtype:Customer
:raise ValueError: If the customer with the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp customer with name '%s'.", customer_name)
self.get_customers()
for customer in self._customers_list:
if customer.name == customer_name:
return customer
message = "The customer with the name '{}' does not exist.".format(customer_name)
logger.critical(message)
raise ValueError(message)
def get_applications_of_customer_by_id(self, customer_id):
"""
Get the SecureApp applications of the customer by his id
:param customer_id: An id of the customer whose applications we return
:type customer_id: int
:return: Applications of the customer specified by id
:rtype: Secure_App_Helper.XML_Objects.REST.Applications_List
:raise ValueError: If failed to get customer's applications
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp applications of customer with id '%s'.", customer_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/customers/{}/applications".format(customer_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Customer with ID '{}' does not exist.".format(customer_id)
logger.critical(message)
raise ValueError(message)
except REST_Bad_Request_Error:
message = "Failed to get applications of customer with ID '{}'. Bad request.".format(customer_id)
logger.critical(message)
raise ValueError(message)
except REST_Unauthorized_Error:
message = "Failed to get applications of customer with ID '{}'. Access is denied.".format(customer_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp customer's applications."
logger.critical(message)
raise IOError(message)
return Applications_List.from_xml_string(response_string)
def get_applications_of_customer_by_name(self, customer_name):
"""
Get applications of customer by his name
:param customer_name: the name of the customer
:type customer_name: str
:return: Applications of the customer specified by id
:rtype: Secure_App_Helper.XML_Objects.REST.Applications_List
:raise ValueError: If failed to get customer's applications
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp applications of customer with name '%s'.", customer_name)
customer = self.get_customer_by_name(customer_name)
return self.get_applications_of_customer_by_id(customer.id)
def get_member_network_objects_for_group_network_object(self, group_network_object,
get_members_for_nested_groups=True,
all_network_objects=None):
"""
:param group_network_object:
:type group_network_object: Group_Network_Object
:return:
"""
logger.info("Getting member network objects for network object with ID %s.", group_network_object.id)
if all_network_objects is None:
all_network_objects = {network_object.id: network_object for network_object in
self.get_all_network_objects()}
network_objects = []
for member in group_network_object.members:
member = all_network_objects[member.id]
if get_members_for_nested_groups:
logger.debug("Getting nested member objects for network object with ID '%s'.", member.id)
if hasattr(member, "members"):
sub_member_objects = self.get_member_network_objects_for_group_network_object(member,
get_members_for_nested_groups,
all_network_objects)
network_objects.extend(sub_member_objects)
else:
network_objects.append(member)
else:
network_objects.append(member)
return network_objects
def get_member_services_for_group_service(self, group_service, get_members_for_nested_groups=True,
all_services=None):
"""
:param group_service:
:type group_service: Group_Service
:return:
"""
logger.info("Getting member services for service with ID %s.", group_service.id)
if all_services is None:
all_services = {service.id: service for service in self.get_all_services()}
services = []
for member in group_service.members:
member = all_services[member.id]
if get_members_for_nested_groups:
logger.debug("Getting nested services for service with ID '%s'.", member.id)
if hasattr(member, "members"):
sub_member_objects = self.get_member_services_for_group_service(member,
get_members_for_nested_groups,
all_services)
services.extend(sub_member_objects)
else:
services.append(member)
else:
services.append(member)
return services
def get_details_from_reference(self, Reference_Object, Object_Class=None,
xml_from_string_func=None):
"""
:param Reference_Object: The Reference Object
:type Reference_Object: Base_Link_Target
:param Object_Class: Class of the object to return to
:return: Detailed information from referenced object
"""
logger.info("Getting details for reference")
try:
link = Reference_Object.get_reference_link()
# TODO: check function that represents uri as an object.
api_part = link.split("securechangeworkflow/api/")[1]
logger.info("Reference call is /securechangeworkflow/api/{}".format(api_part))
try:
response_string = self.get_uri(
"/securechangeworkflow/api/{}".format(api_part),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp customer's applications."
logger.critical(message)
raise IOError(message)
if xml_from_string_func:
return xml_from_string_func(response_string)
return Object_Class.from_xml_string(response_string)
except (KeyError, AttributeError, IOError, TypeError, IndexError) as error:
logger.critical("Failed to get details about the reference. Error: {}".format(error))
raise ValueError("Failed to get details about the reference.")
def create_connection_repair_ticket(self, app_id, connection_id, ticket):
"""
:param app_id: The ID of the application whose connections will be repaired.
:type app_id: int
:param connection_id: The ID of the connection to be repaired.
:type connection_id: int
:param ticket: The ticket that will be created to repair the connection.
:type ticket: Secure_Change.XML_Objects.REST.Ticket
:return: The ID of the ticket that was created to repair the connection.
:rtype int
:raise ValueError: If the ticket parameters were incorrect.
:raise IOError: If there was a communication error.
"""
logger.info("Creating connection repair ticket for application with ID %s, connection ID %s ", app_id,
connection_id)
ticket_xml = ticket.to_xml_string().encode()
logger.debug("Ticket data: '%s'", ticket_xml)
try:
response = self.post_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections/{}/repair".format(app_id,
connection_id),
ticket_xml, expected_status_codes=201)
ticket_id = response.get_created_item_id()
return ticket_id
except RequestException as error:
message = "Could not create a connection repair ticket for for application with ID '{}', connection ID '{}' , error was '{}'.".format(
app_id, connection_id, error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create a connection repair ticket for for application with ID '{}', connection ID '{}' , error was '{}'.".format(
app_id, connection_id, error)
logger.critical(message)
raise ValueError(message)
def get_cloud_console_servers(self, vendor, search_string):
""" Get the list of cloud network objects in SecureApp .
:return: The list of cloud network objects in SecureApp.
:rtype: VM_Instances
:raise IOError: If there was a communication error.
"""
logger.info("Getting cloud network objects for SecureApp.")
uri = "/securechangeworkflow/api/secureapp/cloud_console/servers?vendor={}&search_string={}".format(vendor,
search_string)
try:
response_string = self.get_uri(uri, expected_status_codes=200).response.content
except RequestException:
message = "Failed to get cloud console servers for SecureApp."
logger.critical(message)
raise IOError(message)
try:
cloud_console_servers = VM_Instances.from_xml_string(response_string)
except (ValueError, AttributeError):
message = "Failed to get cloud console servers for SecureApp."
logger.critical(message)
raise ValueError(message)
return cloud_console_servers
|
import logging
from requests.exceptions import RequestException
from pytos.common.definitions.Url_Params_Builder import URLParamBuilderDict
from pytos.common.exceptions import REST_Not_Found_Error, REST_Client_Error, REST_Unauthorized_Error, \
REST_Bad_Request_Error
from pytos.common.logging.definitions import HELPERS_LOGGER_NAME
from pytos.secureapp.xml_objects.rest import Connection_List, User_List, Applications_List, Services_List, Customers_List, \
Network_Objects_List, Application, User, Single_Service, Group_Service, Basic_Network_Object,\
Range_Network_Object, Host_Network_Object, Subnet_Network_Object, Group_Network_Object, \
Detailed_Application_Connection, Customer, Connections_To_Applications, Connection_To_Application,\
Application_Interfaces, Connection_To_Application_Packs, Detailed_Connection_To_Application_Pack, \
ConnectionExtendedList, VM_Instances
from pytos.securechange.helpers import Secure_Change_Helper
logger = logging.getLogger(HELPERS_LOGGER_NAME)
class Secure_App_Helper(Secure_Change_Helper):
"""
This class is used to interact via HTTP with SecureApp.
It also allows for easy sending of email messages and writing to the SecureChange Message Board.
"""
def __init__(self, hostname, login_data, **kwargs):
"""
:param hostname: The SecureApp hostname with which we will communicate via HTTP.
:type hostname: str
:param login_data: A tuple of (username,password) used for basic authentication with the specified hostname.
:type login_data: tuple
:param message_board_enabled: (Optional) If set to False, Message Board functionality will be disabled.
:type message_board_enabled: bool
"""
logger.debug("Setting up SecureApp Helper.")
self._app_list = Applications_List([])
self._service_list = Services_List([])
self._user_list = User_List([])
self._customers_list = Customers_List([])
super().__init__(hostname, login_data, **kwargs)
def get_user_list(self):
"""
Get the list of currently configured SecureApp users.
:return: The list of currently configured SecureApp users.
:rtype:User_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp users list.")
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/users",
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp users list"
logger.critical(message)
raise IOError(message)
self._user_list = User_List.from_xml_string(response_string)
return self._user_list
def get_user_by_id(self, user_id):
"""
Get the SecureApp user whose ID matches the specified ID.
:param user_id: The ID for the user which will be returned.
:type user_id: int
:return: The user whose ID matches the specified ID.
:rtype:User
:raise ValueError: If an user with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp users with ID '%s'.", user_id)
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/users/{}".format(user_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "User with ID '{}' does not exist.".format(user_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp users list."
logger.critical(message)
raise IOError(message)
return User.from_xml_string(response_string)
def get_user_by_name(self, user_name):
"""
Get the SecureApp user whose name matches the specified name.
:param user_name: The name for the user which will be returned.
:type user_name: name
:return: The user whose name matches the specified name.
:rtype:User
:raise ValueError: If an user with the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp user with name '%s'.", user_name)
self.get_user_list()
for user in self._user_list:
if user.name == user_name:
return user
message = "An user with the name '{}' does not exist.".format(user_name)
logger.critical(message)
raise ValueError(message)
def get_app_by_name(self, app_name, app_domain=None):
"""
Get the SecureApp application whose name matches the specified name.
:param app_name: The name of the application to be returned.
:type app_name: str
:param app_domain: The domain where app resides
:type app_domain: str
:return: The application whose name matches the specified name.
:rtype:Application
:raise ValueError: If an application with the specified name is not found.
"""
if app_domain:
log_msg = "Getting SecureApp application with name '{}' and domain name '{}'.".format(app_name, app_domain)
else:
log_msg = "Getting SecureApp application with name '{}'.".format(app_name)
logger.info(log_msg)
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/applications?name={}".format(app_name),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp application list"
logger.critical(message)
raise IOError(message)
except REST_Not_Found_Error:
message = "An application with the name '{}' does not exist.".format(app_name)
logger.critical(message)
raise ValueError(message)
found_apps = Applications_List.from_xml_string(response_string)
try:
if app_domain:
try:
return [app for app in found_apps if app.customer.name.lower() == app_domain.lower() and app.name == app_name][0]
except (KeyError, AttributeError):
logger.info("No domain found, assuming single domain mode")
return [app for app in found_apps if app.name == app_name][0]
else:
return [app for app in found_apps if app.name == app_name][0]
except IndexError:
message = "An application with the name '{}' does not exist.".format(app_name)
logger.critical(message)
raise ValueError(message)
def get_app_by_id(self, app_id):
"""
Get the SecureApp application whose ID matches the specified ID.
:param app_id: The ID of the application to be returned.
:type app_id: int|str
:return: The application whose ID matches the specified ID.
:rtype:Application
:raise ValueError: If an application with the specified ID is not found.
"""
logger.info("Getting SecureApp application with ID '%s'.", app_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Application with ID {} does nto exist".format(app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp with ID".format(app_id)
logger.critical(message)
raise IOError(message)
return Application.from_xml_string(response_string)
def get_application_list(self):
"""
Get the list of currently configured SecureApp applications.
:return: The currently configured SecureApp applications list.
:rtype:Applications_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp applications list.")
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/applications",
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp application list"
logger.critical(message)
raise IOError(message)
self._app_list = Applications_List.from_xml_string(response_string)
return self._app_list
def get_application_list_by_user_permissions(self, owner=True, editor=True, user_id=None):
"""
Get the list of currently configured SecureApp applications.
:param owner: Applications where user is owner
:type owner: bool
:param editor: Applications where user is editor
:type editor: bool
:param user_id: The user ID who has permissions. If not user for API would be used for filter
:type user_id: str|int
:return: The currently configured SecureApp applications list.
:rtype:Applications_List
:raise IOError: If there was a communication error.
"""
if not user_id:
log_user_id = "used for API call"
else:
log_user_id = user_id
logger.info("Getting SecureApp applications list where user {} is owner({}) and editor({}).".format(
log_user_id, owner, editor
))
filter_params = []
if owner:
filter_params.append("app_owner")
if editor:
filter_params.append("app_editor")
if filter_params:
params = ",".join(filter_params)
query_filter = "?app_permissions={}".format(params)
if user_id:
query_filter += "&userId={}".format(user_id)
else:
query_filter = ""
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository/applications{}".format(
query_filter), expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp application list"
logger.critical(message)
raise IOError(message)
return Applications_List.from_xml_string(response_string)
def get_services_list(self, param_builder=None):
"""
Get the list of currently configured SecureApp services.
:param param_builder: Filter parameters
:type param_builder: URLParamBuilderInterface
:return: The currently configured SecureApp services list.
:rtype:Services_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp services list")
url = "/securechangeworkflow/api/secureapp/repository/services"
if param_builder:
url = "{}{}".format(url, param_builder.build())
try:
response_string = self.get_uri(url, expected_status_codes=200).response.content
except RequestException:
raise IOError("Failed to GET SecureApp services list.")
return Services_List.from_xml_string(response_string)
def get_all_services(self, global_service_only=False):
"""
Get the list of currently configured SecureApp services.
:param global_service_only: Retrieve global services
:return: The currently configured SecureApp services list.
:rtype:Services_List
:raise IOError: If there was a communication error.
"""
if global_service_only:
param_dict = {'globals_only': True}
else:
param_dict = {}
param_builder = URLParamBuilderDict(param_dict)
self._service_list = self.get_services_list(param_builder)
return self._service_list
def get_service_list_available_for_app(self, app_id=None, app_name=None, include_global=False):
"""
Get the list of services that are available to be used in specific application:
services created locally in this application and services created globally for all applications
:param app_id: ID of an application
:type app_id: int
:param app_name: Name of the application
:type app_name: str
:param include_global: If to include or not global services
:type include_global: bool
:raise ValueError: If wrong parameters are used
:raise IOError: If there was problem in communication or API request
:return: The list of services
:rtype:Services_List
"""
if not app_id and not app_name:
msg = "Can't get the list of available services for" \
" application as no ID or name of application is provided"
logger.critical(msg)
raise ValueError(msg)
if not app_id:
app_id = self.get_app_by_name(app_name).id
logger.info("Getting all services available for application with ID %s", app_id)
all_services = self.get_all_services()
return [service for service in all_services
if (service.application_id is not None and service.application_id == app_id)
or (service.is_global() and include_global)]
def search_services_available_for_app(self, service_id=None, service_name=None,
app_id=None, app_name=None, include_global=False):
"""
Find services available for application with specified name or ID
:param service_name: The name of the service
:param service_id: The ID of the service
:param app_id: The ID of the application
:param app_name: The name of the application
:param include_global: If to include global services in search
:return: the list of services found with given id or name
"""
if not any((service_id, service_name)):
msg = "No service name or ID provided."
logger.critical(msg)
raise ValueError(msg)
if not app_id and not app_name:
msg = "No application name or ID provided."
logger.critical(msg)
raise ValueError(msg)
if not app_id:
app_id = self.get_app_by_name(app_name).id
if service_name:
service_info = "with name '{}'".format(service_name)
else:
service_info = "with ID {}".format(service_id)
logger.info("Searching for services {} for application with ID {}".format(service_info, app_id))
available_services = self.get_service_list_available_for_app(app_id, app_name, include_global)
if service_name:
return [service for service in available_services
if (service_name.lower() == service.name.lower())]
else:
return [service for service in available_services
if service.id == service_id]
def get_service_by_id(self, service_id):
"""
Get the SecureApp service by ID
:param service_id: The ID of the service to be returned.
:type service_id: str|int
:return: The service whose name matches the specified name.
:rtype:Single_Service|Group_Service
:raise ValueError: If a service with the specified name is not found.
"""
logger.debug("Getting SecureApp service with ID '%s'.", service_id)
# As object does not have attribute, this API will crash. Uncomment when fixed
# try:
# response_string = self.get_uri(
# "/securechangeworkflow/api/secureapp/repository/services/{}".format(service_id),
# expected_status_codes=200).response.content
# except REST_Not_Found_Error:
# message = "Service with ID {} does not exist.".format(service_id)
# logger.critical(message)
# raise ValueError(message)
# except RequestException:
# message = "Failed to get SecureApp service with ID {}.".format(service_id)
# logger.critical(message)
# raise IOError(message)
# return Service_Object.from_xml_string_auto_type(response_string)
try:
return [service for service in self.get_all_services() if str(service.id) == str(service_id)][0]
except IndexError:
message = "Service with ID {} does not exist.".format(service_id)
logger.critical(message)
raise ValueError(message)
def get_service_by_name(self, service_name, param_builder=None):
"""
Get the SecureApp service whose name matches the specified name.
:param service_name: The name of the service to be returned.
:type service_name: str
:param param_builder: The URI parameters builder
:type param_builder: T <= pytos.common.API_Defines.Url_Params_Builder.URLParamBuilderInterface
:return: The service whose name matches the specified name.
:rtype:Single_Service|Group_Service
:raise ValueError: If a service with the specified name is not found.
"""
logger.debug("Getting SecureApp service with name '%s'.", service_name)
if not param_builder:
param_builder = URLParamBuilderDict({'name': service_name})
else:
param_builder.set("name", service_name)
try:
return self.get_services_list(param_builder)[0]
except (IndexError, REST_Not_Found_Error):
message = "A service with the name '{}' does not exist.".format(service_name)
logger.critical(message)
raise ValueError(message)
def get_service_list_for_app_id(self, app_id):
"""
Get the list of services for Application by Application ID
:param app_id: The ID of Application in SecureApp to get services
:type app_id: int
:return: The list of services configured for Application
:rtype:Services_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp service list for application with ID %s", app_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to get SecureApp services list for application with ID {}".format(app_id)
logger.critical(message)
raise IOError(message)
return Services_List.from_xml_string(response_string)
def get_service_list_for_app_name(self, app_name):
"""
Get the list of services for Application by Application name
:param app_name: The Name of the application for provide services list for
:type app_name: str
:return: The list of services configured for Application
:rtype:Services_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp services list for application '%s'", app_name)
app_id = self.get_app_by_name(app_name).id
return self.get_service_list_for_app_id(app_id)
def get_connections_list_for_app_id(self, app_id):
"""
Get the SecureApp connections list for the application whose ID matches the specified ID.
:param app_id: The ID of the application whose connections will be returned.
:type app_id: int
:return: The connections list for the application whose ID matches the specified ID.
:rtype:Connection_List
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp connections list for application with ID '%s'.", app_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Application with ID {0} does not exist.".format(app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to get SecureApp connections list for application with ID {}.".format(app_id)
logger.critical(message)
raise IOError(message)
return Connection_List.from_xml_string(response_string)
def get_extended_connections_list_for_app_id(self, app_id):
"""
Get extended connections (with all information)
:return:
"""
logger.info("Getting SecureApp connections with details for application with ID {}".format(app_id))
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections_extended".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
msg = "Application with ID does not exists".format(app_id)
logger.critical(msg)
raise ValueError(msg)
except RequestException:
msg = "Failed to get SecureApp connections list for application with ID {}".format(app_id)
logger.critical(msg)
raise IOError(msg)
return ConnectionExtendedList.from_xml_string(response_string)
def get_connections_list_for_app_name(self, app_name):
"""
Get the SecureApp connection list for the application whose name matches the specified name.
:param app_name: The name of the application whose connection list will be returned.
:type app_name: str
:return: The connections list for the application whose name matches the specified name.
:rtype:Connection_List
:raise ValueError: If an application with the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp connections list for application with name '%s'.", app_name)
app_id = self.get_app_by_name(app_name).id
return self.get_connections_list_for_app_id(app_id)
def get_connection_by_name_for_app_id(self, app_id, connection_name):
"""
Get the SecureApp connection by name for application with specified ID
:param app_id: The ID of application to search connection from
:type app_id: int
:param connection_name: The name of the connection to be returned.
:type connection_name: str
:return: The connection whose name matches the specified name
:rtype:Detailed_Application_Connection
:raise ValueError: If connection with the specified ID is not found.
"""
logger.debug("Getting SecureApp Connection with name '%s' "
"from application with ID %s.", connection_name, app_id)
connection_list = self.get_connections_list_for_app_id(app_id)
for connection in connection_list:
if connection.name.lower() == connection_name.lower():
return connection
message = "A connection with the name '{}' does not exist in application with ID {}.".format(
connection_name,
app_id)
logger.critical(message)
raise ValueError(message)
def get_connection_by_name_for_app_name(self, app_name, connection_name):
"""
Get the SecureApp connection by name for application with specified ID
:param app_name: The name of application to search connection from
:type app_name: str
:param connection_name: The name of the connection to be returned.
:type connection_name: str
:return: The connection whose name matches the specified name
:rtype:Detailed_Application_Connection
:raise ValueError: If connection with the specified ID is not found.
"""
logger.debug("Getting SecureApp Connection with name '%s' "
"from application '%s'.", connection_name, app_name)
app_id = self.get_app_by_name(app_name).id
return self.get_connection_by_name_for_app_id(app_id, connection_name)
def get_network_objects_list_for_app_by_id(self, app_id):
"""
Get the list of network objects for SecureApp application by application ID.
:param app_id: Application ID
:type app_id: int
:return: The list of network objects for the specified application.
:rtype:Network_Objects_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting Network objects list for SecureApp application '%s'.", app_id)
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository"
"/applications/{}/network_objects".format(app_id),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET network objects list for SecureApp application with id '{}'".format(app_id)
logger.critical(message)
raise IOError(message)
try:
network_objects_list = Network_Objects_List.from_xml_string(response_string)
except (ValueError, AttributeError):
message = "Failed to get network objects list for application with id '{}'".format(app_id)
logger.critical(message)
raise ValueError(message)
return network_objects_list
def get_all_network_objects(self):
"""
Get the list of all network objects in SecureApp .
:return: The list of all network objects in SecureApp.
:rtype: Network_Objects_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting network objects list for SecureApp.")
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/repository"
"/network_objects", expected_status_codes=200).response.content
except RequestException:
message = "Failed to get network objects list for SecureApp."
logger.critical(message)
raise IOError(message)
try:
network_objects_list = Network_Objects_List.from_xml_string(response_string)
except (ValueError, AttributeError):
message = "Failed to get network objects list for SecureApp."
logger.critical(message)
raise ValueError(message)
return network_objects_list
def get_network_objects_list_for_app_name(self, app_name):
"""
Get the SecureApp network objects list for the application whose name matches the specified name.
:param app_name: The name of the application whose network object list will be returned.
:type app_name: str
:return: The network objects list for the application whose name matches the specified name.
:rtype:Network_Objects_List
:raise ValueError: If an application with the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting network objects list for application with name '%s'.", app_name)
app_id = self.get_app_by_name(app_name).id
return self.get_network_objects_list_for_app_by_id(app_id)
def get_network_object_by_name_for_app_id(self, network_object_name, app_id):
"""
Get the SecureApp network object whose name matches the specified name for the application whose ID matches the
specified ID.
:param app_id: The ID of the application whose network objects will be returned.
:type app_id: int
:param network_object_name: The name of the network object which will be returned.
:type network_object_name: str
:return: The network object whose name matches the specified name for the application whose
ID matches the specified ID.
:rtype:Network_Object_DNS_Host|Network_Object_IP_Address
:raise ValueError: If an application with the specified ID is not found and/or a network object with
the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting network object '%s'.", network_object_name)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/network_objects".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Application with ID '{}' does not exist.".format(app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp network objects list for application with ID {}.".format(app_id)
logger.critical(message)
raise IOError(message)
for network_object in Network_Objects_List.from_xml_string(response_string):
if network_object.name == network_object_name:
return network_object
message = "Could not find network object with name '{}' for application with ID {}.".format(network_object_name,
app_id)
logger.critical(message)
raise ValueError(message)
def get_network_object_by_id_for_app_id(self, network_object_id, app_id):
"""
Get the SecureApp network object whose id matches the specified id for the application whose ID matches the
specified ID.
:param app_id: The ID of the application whose network objects will be returned.
:type app_id: int
:param network_object_id: The id of the network object which will be returned.
:type network_object_id: int
:return: The network object whose id matches the specified id for the application whose
ID matches the specified ID.
:rtype:Network_Object_DNS_Host|Network_Object_IP_Address
:raise ValueError: If an application with the specified ID is not found and/or a network object with
the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting network object with id '%s'.", network_object_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/network_objects".format(app_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Application with ID '{}' does not exist.".format(app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp network objects list for application with ID {}.".format(app_id)
logger.critical(message)
raise IOError(message)
for network_object in Network_Objects_List.from_xml_string(response_string):
if network_object.id == network_object_id:
return network_object
message = "Could not find network object with id '{}' for application with ID {}.".format(network_object_id,
app_id)
logger.critical(message)
raise ValueError(message)
def post_apps(self, apps):
"""
Create the specified SecureApp application object/objects in SecureApp.
:param apps: The application object/objects to create in SecureApp.
:type apps:Application or list of Application
:rtype: bool
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
:return: The ID of the created application.
If more than one object is created, (True, None) is returned.
"""
logger.info("Creating SecureApp applications.")
app_list = Applications_List([])
# Handle a list of apps
if isinstance(apps, list):
app_list.extend(apps)
expected_status_code = [200, 201]
if len(apps) == 0:
message = "The list of applications to create is empty."
logger.critical(message)
raise ValueError(message)
elif isinstance(apps, Applications_List):
app_list.extend(apps)
expected_status_code = [200, 201]
if len(apps) == 0:
message = "The list of applications to create is empty."
logger.critical(message)
raise ValueError(message)
elif isinstance(apps, Application):
app_list.append(apps)
expected_status_code = 201
else:
message = "The provided parameter must be a list of applications, " \
"Secure_App.XML_Objects.REST.Applications_List, or Application"
logger.critical(message)
raise ValueError(message)
try:
response = self.post_uri("/securechangeworkflow/api/secureapp/repository/applications/",
app_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
if expected_status_code == 201:
app_id = response.get_created_item_id()
return app_id
return True
except RequestException as error:
message = "Could not create the following applications: '{}', error was '{}'.".format(
[app.name for app in app_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following applications: '{}', error was '{}'.".format(
[app.name for app in app_list], error)
logger.critical(message)
raise ValueError(message)
def update_app(self, apps, customer_name=None):
"""
Update the specified SecureApp application object/objects in SecureApp.
:param apps: The application object/objects to be updated in SecureApp.
:type apps:Application or list of Application
:return: Returns True or False if updated/not updated
:rtype: bool
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
If more than one object is created, (True, None) is returned.
"""
logger.info("Creating SecureApp applications.")
app_list = Applications_List([])
expected_status_code = [200, 201]
# Handle a list of apps
if isinstance(apps, list):
app_list.extend(apps)
if len(apps) == 0:
message = "The list of applications to update is empty."
logger.critical(message)
raise ValueError(message)
elif isinstance(apps, Applications_List):
app_list.extend(apps)
if len(apps) == 0:
message = "The list of applications to update is empty."
logger.critical(message)
raise ValueError(message)
elif isinstance(apps, Application):
app_list.append(apps)
else:
message = "The provided parameter must be a list of applications, " \
"Secure_App.XML_Objects.REST.Applications_List, or Application"
logger.critical(message)
raise ValueError(message)
# BUG: around for current bug that will return 200 if id is not specified but application will not be updated
for app in app_list:
if not app.id:
try:
app.id = self.get_app_by_name(app.name, customer_name).id
except (ValueError, AttributeError, IOError):
message = "Failed to get id for application '{}'.".format(app.name)
logger.critical(message)
raise ValueError(message)
try:
self.put_uri("/securechangeworkflow/api/secureapp/repository/applications/",
app_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
return True
except RequestException as error:
message = "Could not update the following applications: '{}', error was '{}'.".format(
[app.name for app in app_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not update the following applications: '{}', error was '{}'.".format(
[app.name for app in app_list], error)
logger.critical(message)
raise ValueError(message)
def delete_apps(self, apps):
"""
Delete the specified SecureApp application object/objects in SecureApp.
:param apps: The application object/objects to create in SecureApp.
:type apps:Application|Applications_List|list[Application]
:return: True if the application creation was successful.
:rtype: bool
:raise ValueError: If the specified application does not exist or there was another problem with the parameters.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting applications from SecureApp.")
# Handle a list of apps
if isinstance(apps, list):
if len(apps) == 0:
raise ValueError("The list of applications to delete is empty.")
else:
for app in apps:
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/applications/{}".format(app.id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete the following applications: '{}', error was '{}'.".format(
[app.name for app in apps], error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete the following applications: '{}', error was '{}'.".format(
[app.name for app in apps], error)
logger.critical(message)
raise IOError(message)
return True
# Handle Applications_List
elif isinstance(apps, Applications_List):
if len(apps) == 0:
raise ValueError("The applications list to delete is empty.")
else:
for app in apps:
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/applications/{}".format(app.id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete the following applications: '{}', error was '{}'.".format(
[app.name for app in apps], error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete the following applications: '{}', error was '{}'.".format(
[app.name for app in apps], error)
logger.critical(message)
raise IOError(message)
return True
# Handle Application
elif isinstance(apps, Application):
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/applications/{}".format(apps.id),
expected_status_codes=200)
return True
except REST_Client_Error as error:
message = "Could not delete the following application: '{}', error was '{}'.".format(
apps.name, error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete the following application: '{}', error was '{}'.".format(
apps.name, error)
logger.critical(message)
raise IOError(message)
else:
raise ValueError(
'The provided parameter must be a list of applications,Applications_List,'
' or Application')
def delete_app_by_id(self, app_id):
"""
Delete the SecureApp application with the specified ID.
:param app_id: The ID of the application to be deleted.
:type app_id: int
:return: True if successful.
:rtype: bool
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting application with ID '%s' from SecureApp.", app_id)
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/applications/{}".format(app_id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete application with ID : '{}', error was '{}'.".format(app_id, error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete application with ID : '{}', error was '{}'.".format(app_id, error)
logger.critical(message)
raise IOError(message)
return True
def delete_app_by_name(self, app_name):
"""
Delete the SecureApp application with the specified name.
:param app_name: The name of the application to be deleted.
:type app_name: str
"""
app_id = self.get_app_by_name(app_name).id
return self.delete_app_by_id(app_id)
def delete_all_apps(self):
"""
Delete all configured SecureApp applications.
:return: True if successful.
:rtype: bool
:raise IOError: If there was a communication error.
"""
logger.info("Deleting all existing applications from SecureApp.")
self._app_list = self.get_application_list()
for app in self._app_list:
try:
self.delete_app_by_id(app.id)
except (RequestException, ValueError) as delete_error:
message = "Could not delete application with ID : '{}', error was '{}'.".format(app.id, delete_error)
logger.critical(message)
raise IOError(message)
return True
def create_users(self, users):
"""
Create the specified SecureApp user object/objects in SecureApp.
:param users: The user object/objects to create in SecureApp.
:type users:User_List|User|list[User]
:return: The ID of the created user.
If more than one object is created, True is returned.
:rtype: bool
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
"""
logger.info("Creating SecureApp users.")
users_list = User_List([])
# Handle a list of users
if isinstance(users, list):
if len(users) == 0:
message = "The list of users to create is empty."
logger.critical(message)
raise ValueError(message)
else:
users_list.extend(users_list)
if len(users) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(users, User_List):
if len(users) == 0:
message = "The list of users to create is empty."
logger.critical(message)
raise ValueError(message)
else:
users_list.extend(users)
if len(users) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(users, User):
users_list.append(users)
expected_status_code = 201
else:
raise ValueError(
'The provided parameter must be a list of users,User_List, '
'or User')
try:
response = self.post_uri("/securechangeworkflow/api/secureapp/repository/users/",
users_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
if expected_status_code == 201:
user_id = response.get_created_item_id()
return user_id
return None
except RequestException as error:
message = "Could not create the following users: '{}', error was '{}'.".format(
[user.name for user in users_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following users: '{}', error was '{}'.".format(
[user.name for user in users_list], error)
logger.critical(message)
raise ValueError(message)
def delete_user_by_id(self, user_id):
"""
Delete the SecureApp user with the specified ID.
:param user_id: The ID of the user to be deleted.
:type user_id: int
:return: True if successful.
:rtype: bool
:raise ValueError: If a user with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting user with ID '%s' from SecureApp.", user_id)
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/users/{}".format(user_id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete user with ID {}, error was '{}'.".format(user_id, error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete user with ID {}, error was '{}'.".format(user_id, error)
logger.critical(message)
raise IOError(message)
return True
def delete_user_by_name(self, user_name):
"""
Delete the SecureApp user with the specified name.
:param user_name: The name of the user to be deleted.
:type user_name: string
:return: True if successful.
:rtype: bool
:raise ValueError: If a user with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting user with name '%s' from SecureApp.", user_name)
user_id = self.get_user_by_name(user_name).id
return self.delete_user_by_id(user_id)
def post_services(self, services, app_id=None, app_name=None):
"""
Create the specified SecureApp services in SecureApp,
if application id or name are specified then services will be posted to this application
:param services: The services object/objects to create in SecureApp.
:type services:Single_Service|Group_Service|list[Single_Service]|list[Group_Service]|Services_List
:param app_id: The ID of application.
:type app_id: int
:param app_name: The name of the application.
:type app_name: str
:return: If the object creation was successful and only object was created,
return the ID of the created service.
If more than one object is created, (True, None) is returned.
:rtype: int
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
"""
info = "Creating SecureApp services"
if app_id:
info += " for application with ID {}".format(app_id)
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id)
elif app_name:
info += " for application '{}'".format(app_name)
app_id = self.get_app_by_name(app_name).id
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id)
else:
info += "."
url = "/securechangeworkflow/api/secureapp/repository/services/"
logger.info(info)
services_list = Services_List([])
# Handle a list of services
if isinstance(services, list):
if len(services) == 0:
message = "The list of services to create is empty."
logger.critical(message)
raise ValueError(message)
else:
services_list.extend(services)
if len(services) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(services, Services_List):
if len(services) == 0:
message = "The list of services to create is empty."
logger.critical(message)
raise ValueError(message)
else:
services_list.extend(services)
if len(services) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(services, (Single_Service, Group_Service)):
services_list.append(services)
expected_status_code = 201
else:
raise ValueError(
"The provided parameter must be a list of services,Services_List, "
"Secure_App.XML_Objects.REST.Single_Service or Group_Service")
try:
response = self.post_uri(url,
services_list.to_xml_string().encode(),
expected_status_codes=expected_status_code)
if expected_status_code == 201:
service_id = response.get_created_item_id()
return service_id
return True
except RequestException as error:
message = "Could not create the following services: '{}', error was '{}'".format(
[service.name for service in services_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following services: '{}', error was '{}'".format(
[service.name for service in services_list], error)
logger.critical(message)
raise ValueError(message)
def update_services(self, services, app_id=None, app_name=None):
"""
Update the specified SecureApp services in SecureApp,
if application ID or name are specified then services will be updated for this application
:param services: The services object/objects to update in SecureApp for application.
:type services:Single_Service|Group_Service|[Single_Service]|[Group_Service]|Services_List
:param app_id: The Application ID.
:type app_id: int
:param app_name: The Application name
:type app_name: str
:raise ValueError: If there was a problem with the parameters.
:raise IOError: If there was a communication error.
"""
info = "Updating services for SecureApp"
if app_id:
info += " for application with ID {}".format(app_id)
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id)
elif app_name:
info += " for application '{}'".format(app_name)
app_id = self.get_app_by_name(app_name).id
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services".format(app_id)
else:
info += "."
url = "/securechangeworkflow/api/secureapp/repository/services/"
logger.info(info)
services_list = Services_List([])
# Handle a list of services
if isinstance(services, list):
if len(services) == 0:
message = "The list of services to update is empty."
logger.critical(message)
raise ValueError(message)
else:
services_list.extend(services)
elif isinstance(services, Services_List):
if len(services) == 0:
message = "The list of services to update is empty."
logger.critical(message)
raise ValueError(message)
else:
services_list.extend(services)
elif isinstance(services, (Single_Service,
Group_Service)):
services_list.append(services)
else:
raise ValueError(
"The provided parameter must be a list of services,Services_List, "
"Secure_App.XML_Objects.REST.Single_Service or Group_Service")
try:
self.put_uri(url,
services_list.to_xml_string().encode(),
expected_status_codes=200)
except RequestException as error:
message = "Could not update the following services: '{}', error was '{}'".format(
[service.name for service in services_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not update the following services: '{}', error was '{}'".format(
[service.name for service in services_list], error)
logger.critical(message)
raise ValueError(message)
def delete_service_by_name(self, service_name):
"""
Delete the SecureApp service with the specified name.
:param service_name: The name of the SecureApp service that will be deleted.
:type service_name: str
:return: True if the service deletion was successful.
:rtype: bool
:raise ValueError: If the specified service does not exist or there was another problem with the parameters.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting service with name '%s' from SecureApp.", service_name)
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/services?name={}".format(service_name),
expected_status_codes=200)
except RequestException as error:
message = "Could not delete the service: '{}', error was '{}'".format(service_name, error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not delete the service: '{}', error was '{}'".format(service_name, error)
logger.critical(message)
raise ValueError(message)
return True
def delete_local_service(self, app_id, service_id):
"""Delete local service with in application in SecureApp.
:param app_id: The id of the application
:param service_id: The local service id
:return: True if the service deletion was successful.
:raise ValueError: If the specified service does not exist or there was another problem with the parameters.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting local service id '{}' for application id '{}'".format(app_id, service_id))
url = "/securechangeworkflow/api/secureapp/repository/applications/{}/services/{}".format(app_id, service_id)
try:
self.delete_uri(url, expected_status_codes=200)
except RequestException as error:
message = "Could not delete service with ID: '{}', error was '{}'".format(service_id, error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not delete service with ID: '{}', error was '{}'".format(service_id, error)
logger.critical(message)
raise ValueError(message)
return True
def delete_service_by_id(self, service_id):
"""
Delete the SecureApp service with the specified ID.
:param service_id: The ID of the service to be deleted.
:type service_id: int
:return: True if successful.
:rtype: bool
:raise ValueError: If an service with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Deleting service with ID '%s' from SecureApp.", service_id)
try:
self.delete_uri("/securechangeworkflow/api/secureapp/repository/services/{}".format(service_id),
expected_status_codes=200)
except REST_Client_Error as error:
message = "Could not delete the service with ID '{}', error was '{}'".format(service_id, error)
logger.critical(message)
raise ValueError(message)
except RequestException as error:
message = "Could not delete the service with ID '{}', error was '{}'".format(service_id, error)
logger.critical(message)
raise IOError(message)
return True
def create_network_objects_for_app_id(self, app_id, network_objects):
"""
Create the specified network objects for the application with the specified ID.
:param app_id: The ID of the application that network objects will be created for.
:type app_id: int
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
:return: The ID of the created network object.
If more than one object is created, True is returned.
"""
logger.info("Creating network objects for application with ID '%s'.", app_id)
network_objects_list = Network_Objects_List([])
# Handle a list of network objects
if isinstance(network_objects, list):
if len(network_objects) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
network_objects_list.extend(network_objects)
if len(network_objects) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(network_objects, Network_Objects_List):
if len(network_objects) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
network_objects_list.extend(network_objects)
if len(network_objects_list) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(network_objects, (
Basic_Network_Object, Range_Network_Object,
Host_Network_Object, Subnet_Network_Object,
Group_Network_Object)):
network_objects_list.append(network_objects)
expected_status_code = 201
else:
raise ValueError(
"The provided parameter must be a list of network objects, "
"Secure_App.XML_Objects.REST.Network_Objects_List,Basic_Network_Object, "
"Secure_App.XML_Objects.REST.Range_Network_Object,Host_Network_Object, "
"Secure_App.XML_Objects.REST.Subnet_Network_Object or Group_Network_Object")
try:
response = self.post_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{0}/network_objects".format(app_id),
network_objects_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
if expected_status_code == 201:
network_object_id = response.get_created_item_id()
return network_object_id
return True
except RequestException as error:
message = "Could not create the following network objects: '{}', error was '{}'".format(
[network_object.name for network_object in network_objects_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following network objects: '{}', error was '{}'".format(
[network_object.name for network_object in network_objects_list], error)
logger.critical(message)
raise ValueError(message)
def update_network_objects_for_app_id(self, app_id, network_objects):
"""
Update the specified network objects for the application with the specified ID.
:param app_id: The ID of the application that network objects will be updated for.
:type app_id: int
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
:return: If success true is returned.
:rtype: bool
"""
logger.info("Updating network objects for application with ID '%s'.", app_id)
network_objects_list = Network_Objects_List([])
expected_status_code = 200
# Handle a list of network objects
if isinstance(network_objects, list):
if len(network_objects) == 0:
message = "The list of network objects to update is empty."
logger.critical(message)
raise ValueError(message)
else:
network_objects_list.extend(network_objects)
elif isinstance(network_objects, Network_Objects_List):
if len(network_objects) == 0:
message = "The list of network objects to update is empty."
logger.critical(message)
raise ValueError(message)
else:
network_objects_list.extend(network_objects)
elif isinstance(network_objects, (
Basic_Network_Object, Range_Network_Object,
Host_Network_Object, Subnet_Network_Object,
Group_Network_Object)):
network_objects_list.append(network_objects)
else:
raise ValueError(
"The provided parameter must be a list of network objects, "
"Secure_App.XML_Objects.REST.Network_Objects_List,Basic_Network_Object, "
"Secure_App.XML_Objects.REST.Range_Network_Object,Host_Network_Object, "
"Secure_App.XML_Objects.REST.Subnet_Network_Object or Group_Network_Object")
for network_object in network_objects_list:
if not any((network_object.id, network_object.name)):
message = "One of the network objects does not have neither name nor id"
raise ValueError(message)
elif not network_object.id:
try:
network_object.id = self.get_network_object_by_name_for_app_id(network_object.name, app_id)
except (ValueError, AttributeError, IOError):
message = "Failed to get id for a network object '{}'".format(network_object.name)
logger.critical(message)
raise ValueError(message)
elif not network_object.name:
try:
network_object.name = self.get_network_object_by_id_for_app_id(network_object.id, app_id)
except (ValueError, AttributeError, IOError):
message = "Failed to get name for a network object with id '{}'".format(network_object.id)
logger.critical(message)
raise ValueError(message)
else:
continue
try:
self.put_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/network_objects".format(app_id),
network_objects_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
return True
except RequestException as error:
message = "Could not create the following network objects: '{}', error was '{}'".format(
[network_object.name for network_object in network_objects_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following network objects: '{}', error was '{}'".format(
[network_object.name for network_object in network_objects_list], error)
logger.critical(message)
raise ValueError(message)
def create_network_objects_for_app_name(self, app_name, network_objects):
"""
Create the specified network objects for the application with the specified name.
:param app_name: The ID of the application that network objects will be created for.
:type app_name: str
:return: If the object creation was successful and only object was created, return is (True, object_id), where
object_id is the ID of the created object that is extracted from the Location header.
If more than one object is created, (True, None) is returned.
"""
app_id = self.get_app_by_name(app_name).id
return self.create_network_objects_for_app_id(app_id, network_objects)
def update_network_objects_for_app_name(self, app_name, network_objects):
"""
Update the specified network objects for the application with the specified name.
:param app_name: The ID of the application that network objects will be updated for.
:type app_name: str
:return: If the object update was successful True is returned
"""
app_id = self.get_app_by_name(app_name).id
return self.update_network_objects_for_app_id(app_id, network_objects)
def create_connections_for_app_id(self, app_id, connections):
"""
Create the specified network objects for the application with the specified ID.
:param app_id: The ID of the application that connections will be created for.
:type app_id: int
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
:return: The ID of the created connection.
If more than one object is created, True is returned.
"""
logger.info("Creating network objects for application with ID '%s'.", app_id)
connection_list = Connection_List([])
# Handle a list of services
if isinstance(connections, list):
if len(connections) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
connection_list.extend(connections)
if len(connection_list) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(connections, Connection_List):
if len(connections) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
connection_list.extend(connections)
if len(connection_list) == 1:
expected_status_code = 201
else:
expected_status_code = 200
elif isinstance(connections, Detailed_Application_Connection):
connection_list.append(connections)
expected_status_code = 201
else:
raise ValueError(
"The provided parameter must be a list of connections objects, "
"Secure_App.XML_Objects.REST.Connection_List,"
"Detailed_Application_Connection")
try:
response = self.post_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections".format(app_id),
connection_list.to_xml_string().encode(), expected_status_codes=expected_status_code)
if expected_status_code == 201:
connection_id = response.get_created_item_id()
return connection_id
return True
except RequestException as error:
message = "Could not create the following connections: '{}', error was '{}'.".format(
[connection.name for connection in connection_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create the following connections: '{}', error was '{}'.".format(
[connection.name for connection in connection_list], error)
logger.critical(message)
raise ValueError(message)
def create_connections_for_app_name(self, app_name, connections):
"""
Create the specified connections objects for the application with the specified name.
:param app_name: The ID of the application that connections will be created for.
:type app_name: str
:return: If the object creation was successful and only object was created, return is (True, object_id), where
object_id is the ID of the created object that is extracted from the Location header.
If more than one object is created, (True, None) is returned.
"""
logger.info("Creating connections for application '%s'", app_name)
app_id = self.get_app_by_name(app_name).id
return self.create_connections_for_app_id(app_id, connections)
def update_connection_for_app_id(self, connection, app_id=None, app_name=None):
"""
Update existing connection with new one. Or ID or name of
application the connection belongs to should be provided
:param connection: The new connection to update with
:type connection:Detailed_Application_Connection
:param app_id: The Id of application to update connection for
:type app_id: int
:param app_name: The name of application to update connection for
:type app_name: str
:raise IOError: If there is communication or API error
:raise ValueError: if one of the parameters is wrong one
"""
if not (app_id or app_name):
msg = "No ID or name of application of connection to update is provided"
logger.critical(msg)
raise ValueError(msg)
elif not app_id:
app_id = self.get_app_by_name(app_name).id
if not connection.id and not connection.name:
msg = "No ID or name of connection to update is provided"
logger.critical(msg)
raise ValueError(msg)
elif not connection.id:
logger.info("Updating connection '{}' "
"for application with ID {}".format(connection.name,
app_id))
connection.id = self.get_connection_by_name_for_app_id(app_id, connection.name).id
else:
logger.info("Updating connection with ID {} "
"for application with ID {}".format(connection.id,
app_id))
try:
self.put_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections/{}".format(
app_id,
connection.id),
connection.to_xml_string().encode(),
expected_status_codes=200)
except RequestException:
message = "Failed to update connection with ID {}" \
" for application with ID {}".format(connection.id,
app_id)
logger.critical(message)
raise IOError(message)
def update_connections_for_app(self, connections, app_id=None, app_name=None):
"""
Update the specified network objects for the application with the specified ID.
:param app_id: The ID of the application that connections will be created for.
:type app_id: int
:param app_name: The name of the application that connections will be created for.
:type app_name: str
:raise ValueError: If an application with the specified ID is not found.
:raise IOError: If there was a communication error.
If more than one object is created, True is returned.
"""
if not app_id and not app_name:
msg = "ID or name of application to update connections for is not provided"
logger.critical(msg)
raise ValueError(msg)
elif not app_id:
app_id = self.get_app_by_name(app_name).id
logger.info("Updating network objects for application with ID '%s'.", app_id)
connection_list = Connection_List([])
# Handle a list of services
if isinstance(connections, list):
if len(connections) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
connection_list.extend(connections)
elif isinstance(connections, Connection_List):
if len(connections) == 0:
message = "The list of network objects to create is empty."
logger.critical(message)
raise ValueError(message)
else:
connection_list.extend(connections)
elif isinstance(connections, Detailed_Application_Connection):
connection_list.append(connections)
else:
raise ValueError(
"The provided parameter must be a list of connections objects, "
"Secure_App.XML_Objects.REST.Connection_List,"
"Detailed_Application_Connection")
try:
self.put_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections".format(app_id),
connection_list.to_xml_string().encode(), expected_status_codes=200)
return True
except RequestException as error:
message = "Could not update the following connections: '{}', error was '{}'.".format(
[connection.name for connection in connection_list], error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not update the following connections: '{}', error was '{}'.".format(
[connection.name for connection in connection_list], error)
logger.critical(message)
raise ValueError(message)
def delete_connection_by_id_for_app_id(self, app_id=None, app_name=None, connection_id=None, connection_name=None):
"""
Delete connection by it's ID from application of provided application ID
:param app_id: Application ID to delete connection from
:type app_id: int
:param app_name: Application name
:type app_name: str
:param connection_id: Connection ID to delete from Application
:type connection_id: int
:param connection_name: Connection name
:type connection_name: str
:raise IOError: If there was communication error.
"""
if not app_id and not app_name:
ValueError("Failed to delete connection, as no application ID or name specified")
elif not app_id:
app_id = self.get_app_by_name(app_name).id
if not connection_id and not connection_name:
ValueError("Failed to delete connection, no connection ID or name specified")
elif not connection_id:
connection_id = self.get_connection_by_name_for_app_id(app_id, connection_name).id
logger.info("Deleting Connection with ID %s from application with ID %s", connection_id, app_id)
try:
self.delete_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections/{}".format(
app_id,
connection_id),
expected_status_codes=200)
except RequestException:
message = "Failed to delete connection with ID {} from SecureApp for Application with ID {}".format(
connection_id,
app_id)
logger.critical(message)
raise IOError(message)
def delete_all_connections_for_app(self, app_id=None, app_name=None):
"""
Delete all connections of specified application(by ID or name)
:param app_id:
:param app_name:
:raise IOError: If there were communication problems
:raise ValueError: If no app was found or wrong paramaters passed
"""
if not app_id and not app_name:
raise ValueError("Can't delete connections as no application name or id specified")
if not app_id:
app_id = self.get_app_by_name(app_name).id
logger.info("Deleting all connection from application with ID %s", app_id)
connections = [connection for connection in self.get_connections_list_for_app_id(app_id)]
if connections:
deleted_connections = []
for connection in connections:
try:
self.delete_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections/{}".format(
app_id,
connection.id),
expected_status_codes=200)
except RequestException as error:
connections_names = (con.name for con in connections)
if deleted_connections:
message = "Failed to delete all connections. Deleted '{}' out of '{}'." \
" Got error on connection '{}': {}".format(
deleted_connections,
connections_names,
connection.name,
error)
else:
message = "Failed to delete connections '{}'. Got error on connection '{}': {}".format(
connections_names,
connection.name,
connections_names)
logger.critical(message)
raise IOError(message)
else:
deleted_connections.append(connection.name)
def get_connections_to_applications(self, app_id):
"""
Get connections to application for application
:param app_id: Application ID
:type app_id: str|int
:return: Connections_To_Applications
"""
logger.info("Getting Connections to application for Application with ID {}".format(app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/connections_to_applications".format(
app_id)).response.content
except RequestException:
message = "Failed to get connections to application for app with ID {}".format(app_id)
logger.critical(message)
raise IOError(message)
return Connections_To_Applications.from_xml_string(response_string)
def get_connection_to_application(self, app_id, conn_to_app_id):
"""
Get connection to application by ID for application
:param app_id: Application ID
:type app_id: str|int
:param conn_to_app_id: Id of the connection to application
:type conn_to_app_id: str|int
:return: Connections_To_Applications
"""
logger.info("Getting Connection to Application with ID {} for Application with ID {}".format(
conn_to_app_id, app_id))
try:
response_string = self.get_uri("securechangeworkflow/api/secureapp/"
"repository/applications/{}/connections_to_applications/{}".format(
app_id, conn_to_app_id)).response.content
except REST_Not_Found_Error:
message = "Connection to Application with ID '{}' does not exist in Application with ID {}.".format(
conn_to_app_id, app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to get connection to application with ID {} for app with ID {}".format(
conn_to_app_id, app_id)
logger.critical(message)
raise IOError(message)
return Connection_To_Application.from_xml_string(response_string)
def get_application_interfaces(self, app_id):
"""
Get application interfaces for application
:param app_id: Application ID
:type app_id: str|int
:return: Application_Interfaces
"""
logger.info("Getting appplication interfaces for Application with ID {}".format(app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/application_interfaces".format(
app_id)).response.content
except RequestException:
message = "Failed to get connections to application for app with ID {}".format(app_id)
logger.critical(message)
raise IOError(message)
return Application_Interfaces.from_xml_string(response_string)
def get_application_interface(self, app_id, app_interface_id):
"""
Get application interfaces by ID for application
:param app_id: Application ID
:type app_id: str|int
:param app_interface_id: Application Interface ID
:type app_interface_id: int|str
:return: Application_Interface
"""
logger.info("Getting Application Interface with ID {} for Application with ID {}".format(
app_interface_id, app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/application_interfaces/{}".format(
app_id, app_interface_id)).response.content
except REST_Not_Found_Error:
message = "Application Interface with ID '{}' does not exist in Application with ID {}.".format(
app_interface_id, app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to get Application Interface with ID {} for app with ID {}".format(
app_interface_id, app_id)
logger.critical(message)
raise IOError(message)
return Connection_To_Application.from_xml_string(response_string)
def get_connection_to_application_pack(self, app_id, con_to_app_pack_id):
"""
Get connection to application packs for application
:param app_id: Application ID
:type app_id: str|int
:param con_to_app_pack_id: ID of the connection to Application Pack
:type con_to_app_pack_id: int|str
:return: Connection_To_Application_Pack
"""
logger.info("Getting connection to applicaton pack {} for Application with ID {}".format(
con_to_app_pack_id, app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/connection_to_application_packs/{}".format(
app_id, con_to_app_pack_id)).response.content
except REST_Not_Found_Error:
message = "Connection to application pack with ID '{}' does not exist in Application with ID {}.".format(
con_to_app_pack_id, app_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to get connection to applicaton pack {} for app with ID {}".format(
con_to_app_pack_id, app_id)
logger.critical(message)
raise IOError(message)
return Detailed_Connection_To_Application_Pack.from_xml_string(response_string)
def get_connection_to_application_packs(self, app_id):
"""
Get connection to application packs for application
:param app_id: Application ID
:type app_id: str|int
:return: Connection_To_Application_Packs
"""
logger.info("Getting connection to applicaton packs for Application with ID {}".format(app_id))
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/"
"repository/applications/{}/connection_to_application_packs".format(
app_id)).response.content
except RequestException:
message = "Failed to get connection to applicaton packs for app with ID {}".format(app_id)
logger.critical(message)
raise IOError(message)
return Connection_To_Application_Packs.from_xml_string(response_string)
def get_customers(self):
"""
Get the list of currently configured SecureApp customers.
:return: The list of currently configured SecureApp customers.
:rtype:Customers_List
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp customers list.")
try:
response_string = self.get_uri("/securechangeworkflow/api/secureapp/customers",
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp customers list"
logger.critical(message)
raise IOError(message)
self._customers_list = Customers_List.from_xml_string(response_string)
return self._customers_list
def get_customer_by_id(self, customer_id):
"""
Get the SecureApp customer whose ID matches the specified ID.
:param customer_id: The ID for the customer which will be returned.
:type customer_id: int
:return: The customer whose ID matches the specified ID.
:rtype:Customer
:raise ValueError: If an customer with the specified ID is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp customer with ID '%s'.", customer_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/customers/{}".format(customer_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Customer with ID '{}' does not exist.".format(customer_id)
logger.critical(message)
raise ValueError(message)
except REST_Bad_Request_Error:
message = "Failed to GET SecureApp customer. Check if you are not in a single mode".format(customer_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp customer."
logger.critical(message)
raise IOError(message)
return Customer.from_xml_string(response_string)
def get_customer_by_name(self, customer_name):
"""
Get the SecureApp customer whose name matches the specified name.
:param customer_name: The name for the customer which will be returned.
:type customer_name: str
:return: The customer whose name matches the specified name.
:rtype:Customer
:raise ValueError: If the customer with the specified name is not found.
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp customer with name '%s'.", customer_name)
self.get_customers()
for customer in self._customers_list:
if customer.name == customer_name:
return customer
message = "The customer with the name '{}' does not exist.".format(customer_name)
logger.critical(message)
raise ValueError(message)
def get_applications_of_customer_by_id(self, customer_id):
"""
Get the SecureApp applications of the customer by his id
:param customer_id: An id of the customer whose applications we return
:type customer_id: int
:return: Applications of the customer specified by id
:rtype: Secure_App_Helper.XML_Objects.REST.Applications_List
:raise ValueError: If failed to get customer's applications
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp applications of customer with id '%s'.", customer_id)
try:
response_string = self.get_uri(
"/securechangeworkflow/api/secureapp/customers/{}/applications".format(customer_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Customer with ID '{}' does not exist.".format(customer_id)
logger.critical(message)
raise ValueError(message)
except REST_Bad_Request_Error:
message = "Failed to get applications of customer with ID '{}'. Bad request.".format(customer_id)
logger.critical(message)
raise ValueError(message)
except REST_Unauthorized_Error:
message = "Failed to get applications of customer with ID '{}'. Access is denied.".format(customer_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET SecureApp customer's applications."
logger.critical(message)
raise IOError(message)
return Applications_List.from_xml_string(response_string)
def get_applications_of_customer_by_name(self, customer_name):
"""
Get applications of customer by his name
:param customer_name: the name of the customer
:type customer_name: str
:return: Applications of the customer specified by id
:rtype: Secure_App_Helper.XML_Objects.REST.Applications_List
:raise ValueError: If failed to get customer's applications
:raise IOError: If there was a communication error.
"""
logger.info("Getting SecureApp applications of customer with name '%s'.", customer_name)
customer = self.get_customer_by_name(customer_name)
return self.get_applications_of_customer_by_id(customer.id)
def get_member_network_objects_for_group_network_object(self, group_network_object,
get_members_for_nested_groups=True,
all_network_objects=None):
"""
:param group_network_object:
:type group_network_object: Group_Network_Object
:return:
"""
logger.info("Getting member network objects for network object with ID %s.", group_network_object.id)
if all_network_objects is None:
all_network_objects = {network_object.id: network_object for network_object in
self.get_all_network_objects()}
network_objects = []
for member in group_network_object.members:
member = all_network_objects[member.id]
if get_members_for_nested_groups:
logger.debug("Getting nested member objects for network object with ID '%s'.", member.id)
if hasattr(member, "members"):
sub_member_objects = self.get_member_network_objects_for_group_network_object(member,
get_members_for_nested_groups,
all_network_objects)
network_objects.extend(sub_member_objects)
else:
network_objects.append(member)
else:
network_objects.append(member)
return network_objects
def get_member_services_for_group_service(self, group_service, get_members_for_nested_groups=True,
all_services=None):
"""
:param group_service:
:type group_service: Group_Service
:return:
"""
logger.info("Getting member services for service with ID %s.", group_service.id)
if all_services is None:
all_services = {service.id: service for service in self.get_all_services()}
services = []
for member in group_service.members:
member = all_services[member.id]
if get_members_for_nested_groups:
logger.debug("Getting nested services for service with ID '%s'.", member.id)
if hasattr(member, "members"):
sub_member_objects = self.get_member_services_for_group_service(member,
get_members_for_nested_groups,
all_services)
services.extend(sub_member_objects)
else:
services.append(member)
else:
services.append(member)
return services
def get_details_from_reference(self, Reference_Object, Object_Class=None,
xml_from_string_func=None):
"""
:param Reference_Object: The Reference Object
:type Reference_Object: Base_Link_Target
:param Object_Class: Class of the object to return to
:return: Detailed information from referenced object
"""
logger.info("Getting details for reference")
try:
link = Reference_Object.get_reference_link()
# TODO: check function that represents uri as an object.
api_part = link.split("securechangeworkflow/api/")[1]
logger.info("Reference call is /securechangeworkflow/api/{}".format(api_part))
try:
response_string = self.get_uri(
"/securechangeworkflow/api/{}".format(api_part),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to GET SecureApp customer's applications."
logger.critical(message)
raise IOError(message)
if xml_from_string_func:
return xml_from_string_func(response_string)
return Object_Class.from_xml_string(response_string)
except (KeyError, AttributeError, IOError, TypeError, IndexError) as error:
logger.critical("Failed to get details about the reference. Error: {}".format(error))
raise ValueError("Failed to get details about the reference.")
def create_connection_repair_ticket(self, app_id, connection_id, ticket):
"""
:param app_id: The ID of the application whose connections will be repaired.
:type app_id: int
:param connection_id: The ID of the connection to be repaired.
:type connection_id: int
:param ticket: The ticket that will be created to repair the connection.
:type ticket: Secure_Change.XML_Objects.REST.Ticket
:return: The ID of the ticket that was created to repair the connection.
:rtype int
:raise ValueError: If the ticket parameters were incorrect.
:raise IOError: If there was a communication error.
"""
logger.info("Creating connection repair ticket for application with ID %s, connection ID %s ", app_id,
connection_id)
ticket_xml = ticket.to_xml_string().encode()
logger.debug("Ticket data: '%s'", ticket_xml)
try:
response = self.post_uri(
"/securechangeworkflow/api/secureapp/repository/applications/{}/connections/{}/repair".format(app_id,
connection_id),
ticket_xml, expected_status_codes=201)
ticket_id = response.get_created_item_id()
return ticket_id
except RequestException as error:
message = "Could not create a connection repair ticket for for application with ID '{}', connection ID '{}' , error was '{}'.".format(
app_id, connection_id, error)
logger.critical(message)
raise IOError(message)
except REST_Client_Error as error:
message = "Could not create a connection repair ticket for for application with ID '{}', connection ID '{}' , error was '{}'.".format(
app_id, connection_id, error)
logger.critical(message)
raise ValueError(message)
def get_cloud_console_servers(self, vendor, search_string):
""" Get the list of cloud network objects in SecureApp .
:return: The list of cloud network objects in SecureApp.
:rtype: VM_Instances
:raise IOError: If there was a communication error.
"""
logger.info("Getting cloud network objects for SecureApp.")
uri = "/securechangeworkflow/api/secureapp/cloud_console/servers?vendor={}&search_string={}".format(vendor,
search_string)
try:
response_string = self.get_uri(uri, expected_status_codes=200).response.content
except RequestException:
message = "Failed to get cloud console servers for SecureApp."
logger.critical(message)
raise IOError(message)
try:
cloud_console_servers = VM_Instances.from_xml_string(response_string)
except (ValueError, AttributeError):
message = "Failed to get cloud console servers for SecureApp."
logger.critical(message)
raise ValueError(message)
return cloud_console_servers
|
en
| 0.792407
|
This class is used to interact via HTTP with SecureApp. It also allows for easy sending of email messages and writing to the SecureChange Message Board. :param hostname: The SecureApp hostname with which we will communicate via HTTP. :type hostname: str :param login_data: A tuple of (username,password) used for basic authentication with the specified hostname. :type login_data: tuple :param message_board_enabled: (Optional) If set to False, Message Board functionality will be disabled. :type message_board_enabled: bool Get the list of currently configured SecureApp users. :return: The list of currently configured SecureApp users. :rtype:User_List :raise IOError: If there was a communication error. Get the SecureApp user whose ID matches the specified ID. :param user_id: The ID for the user which will be returned. :type user_id: int :return: The user whose ID matches the specified ID. :rtype:User :raise ValueError: If an user with the specified ID is not found. :raise IOError: If there was a communication error. Get the SecureApp user whose name matches the specified name. :param user_name: The name for the user which will be returned. :type user_name: name :return: The user whose name matches the specified name. :rtype:User :raise ValueError: If an user with the specified name is not found. :raise IOError: If there was a communication error. Get the SecureApp application whose name matches the specified name. :param app_name: The name of the application to be returned. :type app_name: str :param app_domain: The domain where app resides :type app_domain: str :return: The application whose name matches the specified name. :rtype:Application :raise ValueError: If an application with the specified name is not found. Get the SecureApp application whose ID matches the specified ID. :param app_id: The ID of the application to be returned. :type app_id: int|str :return: The application whose ID matches the specified ID. :rtype:Application :raise ValueError: If an application with the specified ID is not found. Get the list of currently configured SecureApp applications. :return: The currently configured SecureApp applications list. :rtype:Applications_List :raise IOError: If there was a communication error. Get the list of currently configured SecureApp applications. :param owner: Applications where user is owner :type owner: bool :param editor: Applications where user is editor :type editor: bool :param user_id: The user ID who has permissions. If not user for API would be used for filter :type user_id: str|int :return: The currently configured SecureApp applications list. :rtype:Applications_List :raise IOError: If there was a communication error. Get the list of currently configured SecureApp services. :param param_builder: Filter parameters :type param_builder: URLParamBuilderInterface :return: The currently configured SecureApp services list. :rtype:Services_List :raise IOError: If there was a communication error. Get the list of currently configured SecureApp services. :param global_service_only: Retrieve global services :return: The currently configured SecureApp services list. :rtype:Services_List :raise IOError: If there was a communication error. Get the list of services that are available to be used in specific application: services created locally in this application and services created globally for all applications :param app_id: ID of an application :type app_id: int :param app_name: Name of the application :type app_name: str :param include_global: If to include or not global services :type include_global: bool :raise ValueError: If wrong parameters are used :raise IOError: If there was problem in communication or API request :return: The list of services :rtype:Services_List Find services available for application with specified name or ID :param service_name: The name of the service :param service_id: The ID of the service :param app_id: The ID of the application :param app_name: The name of the application :param include_global: If to include global services in search :return: the list of services found with given id or name Get the SecureApp service by ID :param service_id: The ID of the service to be returned. :type service_id: str|int :return: The service whose name matches the specified name. :rtype:Single_Service|Group_Service :raise ValueError: If a service with the specified name is not found. # As object does not have attribute, this API will crash. Uncomment when fixed # try: # response_string = self.get_uri( # "/securechangeworkflow/api/secureapp/repository/services/{}".format(service_id), # expected_status_codes=200).response.content # except REST_Not_Found_Error: # message = "Service with ID {} does not exist.".format(service_id) # logger.critical(message) # raise ValueError(message) # except RequestException: # message = "Failed to get SecureApp service with ID {}.".format(service_id) # logger.critical(message) # raise IOError(message) # return Service_Object.from_xml_string_auto_type(response_string) Get the SecureApp service whose name matches the specified name. :param service_name: The name of the service to be returned. :type service_name: str :param param_builder: The URI parameters builder :type param_builder: T <= pytos.common.API_Defines.Url_Params_Builder.URLParamBuilderInterface :return: The service whose name matches the specified name. :rtype:Single_Service|Group_Service :raise ValueError: If a service with the specified name is not found. Get the list of services for Application by Application ID :param app_id: The ID of Application in SecureApp to get services :type app_id: int :return: The list of services configured for Application :rtype:Services_List :raise IOError: If there was a communication error. Get the list of services for Application by Application name :param app_name: The Name of the application for provide services list for :type app_name: str :return: The list of services configured for Application :rtype:Services_List :raise IOError: If there was a communication error. Get the SecureApp connections list for the application whose ID matches the specified ID. :param app_id: The ID of the application whose connections will be returned. :type app_id: int :return: The connections list for the application whose ID matches the specified ID. :rtype:Connection_List :raise ValueError: If an application with the specified ID is not found. :raise IOError: If there was a communication error. Get extended connections (with all information) :return: Get the SecureApp connection list for the application whose name matches the specified name. :param app_name: The name of the application whose connection list will be returned. :type app_name: str :return: The connections list for the application whose name matches the specified name. :rtype:Connection_List :raise ValueError: If an application with the specified name is not found. :raise IOError: If there was a communication error. Get the SecureApp connection by name for application with specified ID :param app_id: The ID of application to search connection from :type app_id: int :param connection_name: The name of the connection to be returned. :type connection_name: str :return: The connection whose name matches the specified name :rtype:Detailed_Application_Connection :raise ValueError: If connection with the specified ID is not found. Get the SecureApp connection by name for application with specified ID :param app_name: The name of application to search connection from :type app_name: str :param connection_name: The name of the connection to be returned. :type connection_name: str :return: The connection whose name matches the specified name :rtype:Detailed_Application_Connection :raise ValueError: If connection with the specified ID is not found. Get the list of network objects for SecureApp application by application ID. :param app_id: Application ID :type app_id: int :return: The list of network objects for the specified application. :rtype:Network_Objects_List :raise IOError: If there was a communication error. Get the list of all network objects in SecureApp . :return: The list of all network objects in SecureApp. :rtype: Network_Objects_List :raise IOError: If there was a communication error. Get the SecureApp network objects list for the application whose name matches the specified name. :param app_name: The name of the application whose network object list will be returned. :type app_name: str :return: The network objects list for the application whose name matches the specified name. :rtype:Network_Objects_List :raise ValueError: If an application with the specified name is not found. :raise IOError: If there was a communication error. Get the SecureApp network object whose name matches the specified name for the application whose ID matches the specified ID. :param app_id: The ID of the application whose network objects will be returned. :type app_id: int :param network_object_name: The name of the network object which will be returned. :type network_object_name: str :return: The network object whose name matches the specified name for the application whose ID matches the specified ID. :rtype:Network_Object_DNS_Host|Network_Object_IP_Address :raise ValueError: If an application with the specified ID is not found and/or a network object with the specified name is not found. :raise IOError: If there was a communication error. Get the SecureApp network object whose id matches the specified id for the application whose ID matches the specified ID. :param app_id: The ID of the application whose network objects will be returned. :type app_id: int :param network_object_id: The id of the network object which will be returned. :type network_object_id: int :return: The network object whose id matches the specified id for the application whose ID matches the specified ID. :rtype:Network_Object_DNS_Host|Network_Object_IP_Address :raise ValueError: If an application with the specified ID is not found and/or a network object with the specified name is not found. :raise IOError: If there was a communication error. Create the specified SecureApp application object/objects in SecureApp. :param apps: The application object/objects to create in SecureApp. :type apps:Application or list of Application :rtype: bool :raise ValueError: If there was a problem with the parameters. :raise IOError: If there was a communication error. :return: The ID of the created application. If more than one object is created, (True, None) is returned. # Handle a list of apps Update the specified SecureApp application object/objects in SecureApp. :param apps: The application object/objects to be updated in SecureApp. :type apps:Application or list of Application :return: Returns True or False if updated/not updated :rtype: bool :raise ValueError: If there was a problem with the parameters. :raise IOError: If there was a communication error. If more than one object is created, (True, None) is returned. # Handle a list of apps # BUG: around for current bug that will return 200 if id is not specified but application will not be updated Delete the specified SecureApp application object/objects in SecureApp. :param apps: The application object/objects to create in SecureApp. :type apps:Application|Applications_List|list[Application] :return: True if the application creation was successful. :rtype: bool :raise ValueError: If the specified application does not exist or there was another problem with the parameters. :raise IOError: If there was a communication error. # Handle a list of apps # Handle Applications_List # Handle Application Delete the SecureApp application with the specified ID. :param app_id: The ID of the application to be deleted. :type app_id: int :return: True if successful. :rtype: bool :raise ValueError: If an application with the specified ID is not found. :raise IOError: If there was a communication error. Delete the SecureApp application with the specified name. :param app_name: The name of the application to be deleted. :type app_name: str Delete all configured SecureApp applications. :return: True if successful. :rtype: bool :raise IOError: If there was a communication error. Create the specified SecureApp user object/objects in SecureApp. :param users: The user object/objects to create in SecureApp. :type users:User_List|User|list[User] :return: The ID of the created user. If more than one object is created, True is returned. :rtype: bool :raise ValueError: If there was a problem with the parameters. :raise IOError: If there was a communication error. # Handle a list of users Delete the SecureApp user with the specified ID. :param user_id: The ID of the user to be deleted. :type user_id: int :return: True if successful. :rtype: bool :raise ValueError: If a user with the specified ID is not found. :raise IOError: If there was a communication error. Delete the SecureApp user with the specified name. :param user_name: The name of the user to be deleted. :type user_name: string :return: True if successful. :rtype: bool :raise ValueError: If a user with the specified ID is not found. :raise IOError: If there was a communication error. Create the specified SecureApp services in SecureApp, if application id or name are specified then services will be posted to this application :param services: The services object/objects to create in SecureApp. :type services:Single_Service|Group_Service|list[Single_Service]|list[Group_Service]|Services_List :param app_id: The ID of application. :type app_id: int :param app_name: The name of the application. :type app_name: str :return: If the object creation was successful and only object was created, return the ID of the created service. If more than one object is created, (True, None) is returned. :rtype: int :raise ValueError: If there was a problem with the parameters. :raise IOError: If there was a communication error. # Handle a list of services Update the specified SecureApp services in SecureApp, if application ID or name are specified then services will be updated for this application :param services: The services object/objects to update in SecureApp for application. :type services:Single_Service|Group_Service|[Single_Service]|[Group_Service]|Services_List :param app_id: The Application ID. :type app_id: int :param app_name: The Application name :type app_name: str :raise ValueError: If there was a problem with the parameters. :raise IOError: If there was a communication error. # Handle a list of services Delete the SecureApp service with the specified name. :param service_name: The name of the SecureApp service that will be deleted. :type service_name: str :return: True if the service deletion was successful. :rtype: bool :raise ValueError: If the specified service does not exist or there was another problem with the parameters. :raise IOError: If there was a communication error. Delete local service with in application in SecureApp. :param app_id: The id of the application :param service_id: The local service id :return: True if the service deletion was successful. :raise ValueError: If the specified service does not exist or there was another problem with the parameters. :raise IOError: If there was a communication error. Delete the SecureApp service with the specified ID. :param service_id: The ID of the service to be deleted. :type service_id: int :return: True if successful. :rtype: bool :raise ValueError: If an service with the specified ID is not found. :raise IOError: If there was a communication error. Create the specified network objects for the application with the specified ID. :param app_id: The ID of the application that network objects will be created for. :type app_id: int :raise ValueError: If an application with the specified ID is not found. :raise IOError: If there was a communication error. :return: The ID of the created network object. If more than one object is created, True is returned. # Handle a list of network objects Update the specified network objects for the application with the specified ID. :param app_id: The ID of the application that network objects will be updated for. :type app_id: int :raise ValueError: If an application with the specified ID is not found. :raise IOError: If there was a communication error. :return: If success true is returned. :rtype: bool # Handle a list of network objects Create the specified network objects for the application with the specified name. :param app_name: The ID of the application that network objects will be created for. :type app_name: str :return: If the object creation was successful and only object was created, return is (True, object_id), where object_id is the ID of the created object that is extracted from the Location header. If more than one object is created, (True, None) is returned. Update the specified network objects for the application with the specified name. :param app_name: The ID of the application that network objects will be updated for. :type app_name: str :return: If the object update was successful True is returned Create the specified network objects for the application with the specified ID. :param app_id: The ID of the application that connections will be created for. :type app_id: int :raise ValueError: If an application with the specified ID is not found. :raise IOError: If there was a communication error. :return: The ID of the created connection. If more than one object is created, True is returned. # Handle a list of services Create the specified connections objects for the application with the specified name. :param app_name: The ID of the application that connections will be created for. :type app_name: str :return: If the object creation was successful and only object was created, return is (True, object_id), where object_id is the ID of the created object that is extracted from the Location header. If more than one object is created, (True, None) is returned. Update existing connection with new one. Or ID or name of application the connection belongs to should be provided :param connection: The new connection to update with :type connection:Detailed_Application_Connection :param app_id: The Id of application to update connection for :type app_id: int :param app_name: The name of application to update connection for :type app_name: str :raise IOError: If there is communication or API error :raise ValueError: if one of the parameters is wrong one Update the specified network objects for the application with the specified ID. :param app_id: The ID of the application that connections will be created for. :type app_id: int :param app_name: The name of the application that connections will be created for. :type app_name: str :raise ValueError: If an application with the specified ID is not found. :raise IOError: If there was a communication error. If more than one object is created, True is returned. # Handle a list of services Delete connection by it's ID from application of provided application ID :param app_id: Application ID to delete connection from :type app_id: int :param app_name: Application name :type app_name: str :param connection_id: Connection ID to delete from Application :type connection_id: int :param connection_name: Connection name :type connection_name: str :raise IOError: If there was communication error. Delete all connections of specified application(by ID or name) :param app_id: :param app_name: :raise IOError: If there were communication problems :raise ValueError: If no app was found or wrong paramaters passed Get connections to application for application :param app_id: Application ID :type app_id: str|int :return: Connections_To_Applications Get connection to application by ID for application :param app_id: Application ID :type app_id: str|int :param conn_to_app_id: Id of the connection to application :type conn_to_app_id: str|int :return: Connections_To_Applications Get application interfaces for application :param app_id: Application ID :type app_id: str|int :return: Application_Interfaces Get application interfaces by ID for application :param app_id: Application ID :type app_id: str|int :param app_interface_id: Application Interface ID :type app_interface_id: int|str :return: Application_Interface Get connection to application packs for application :param app_id: Application ID :type app_id: str|int :param con_to_app_pack_id: ID of the connection to Application Pack :type con_to_app_pack_id: int|str :return: Connection_To_Application_Pack Get connection to application packs for application :param app_id: Application ID :type app_id: str|int :return: Connection_To_Application_Packs Get the list of currently configured SecureApp customers. :return: The list of currently configured SecureApp customers. :rtype:Customers_List :raise IOError: If there was a communication error. Get the SecureApp customer whose ID matches the specified ID. :param customer_id: The ID for the customer which will be returned. :type customer_id: int :return: The customer whose ID matches the specified ID. :rtype:Customer :raise ValueError: If an customer with the specified ID is not found. :raise IOError: If there was a communication error. Get the SecureApp customer whose name matches the specified name. :param customer_name: The name for the customer which will be returned. :type customer_name: str :return: The customer whose name matches the specified name. :rtype:Customer :raise ValueError: If the customer with the specified name is not found. :raise IOError: If there was a communication error. Get the SecureApp applications of the customer by his id :param customer_id: An id of the customer whose applications we return :type customer_id: int :return: Applications of the customer specified by id :rtype: Secure_App_Helper.XML_Objects.REST.Applications_List :raise ValueError: If failed to get customer's applications :raise IOError: If there was a communication error. Get applications of customer by his name :param customer_name: the name of the customer :type customer_name: str :return: Applications of the customer specified by id :rtype: Secure_App_Helper.XML_Objects.REST.Applications_List :raise ValueError: If failed to get customer's applications :raise IOError: If there was a communication error. :param group_network_object: :type group_network_object: Group_Network_Object :return: :param group_service: :type group_service: Group_Service :return: :param Reference_Object: The Reference Object :type Reference_Object: Base_Link_Target :param Object_Class: Class of the object to return to :return: Detailed information from referenced object # TODO: check function that represents uri as an object. :param app_id: The ID of the application whose connections will be repaired. :type app_id: int :param connection_id: The ID of the connection to be repaired. :type connection_id: int :param ticket: The ticket that will be created to repair the connection. :type ticket: Secure_Change.XML_Objects.REST.Ticket :return: The ID of the ticket that was created to repair the connection. :rtype int :raise ValueError: If the ticket parameters were incorrect. :raise IOError: If there was a communication error. Get the list of cloud network objects in SecureApp . :return: The list of cloud network objects in SecureApp. :rtype: VM_Instances :raise IOError: If there was a communication error.
| 2.328582
| 2
|
LinkedListMiddleNode.py
|
sprasadhpy/Python_Data_structures
| 0
|
6629673
|
class Node:
def __init__(self, data):
self.data = data
self.next_node = None
class LinkedList:
def __init__(self):
self.head = None
self.size = 0
# O(N) linear running time complexity
def get_middle_node(self):
fast_pointer = self.head
slow_pointer = self.head
while fast_pointer.next_node and fast_pointer.next_node.next_node:
fast_pointer = fast_pointer.next_node.next_node
slow_pointer = slow_pointer.next_node
return slow_pointer
def insert(self, data):
self.size = self.size + 1
new_node = Node(data)
if not self.head:
self.head = new_node
else:
new_node.next_node = self.head
self.head = new_node
def traverse_list(self):
actual_node = self.head
while actual_node is not None:
print("%d" % actual_node.data)
actual_node = actual_node.next_node
if __name__ == '__main__':
linked_list = LinkedList()
linked_list.insert(10)
linked_list.insert(20)
linked_list.insert(30)
linked_list.insert(40)
print(linked_list.get_middle_node().data)
|
class Node:
def __init__(self, data):
self.data = data
self.next_node = None
class LinkedList:
def __init__(self):
self.head = None
self.size = 0
# O(N) linear running time complexity
def get_middle_node(self):
fast_pointer = self.head
slow_pointer = self.head
while fast_pointer.next_node and fast_pointer.next_node.next_node:
fast_pointer = fast_pointer.next_node.next_node
slow_pointer = slow_pointer.next_node
return slow_pointer
def insert(self, data):
self.size = self.size + 1
new_node = Node(data)
if not self.head:
self.head = new_node
else:
new_node.next_node = self.head
self.head = new_node
def traverse_list(self):
actual_node = self.head
while actual_node is not None:
print("%d" % actual_node.data)
actual_node = actual_node.next_node
if __name__ == '__main__':
linked_list = LinkedList()
linked_list.insert(10)
linked_list.insert(20)
linked_list.insert(30)
linked_list.insert(40)
print(linked_list.get_middle_node().data)
|
en
| 0.829293
|
# O(N) linear running time complexity
| 4.313182
| 4
|
src/pyramid_authsanity/interfaces.py
|
usingnamespace/pyramid_authsanity
| 19
|
6629674
|
<reponame>usingnamespace/pyramid_authsanity
from zope.interface import Attribute, Interface
class IAuthSourceService(Interface):
""" Represents an authentication source. """
vary = Attribute("List of HTTP headers to Vary the response by.")
def get_value():
""" Returns the opaque value that was stored. """
def headers_remember(value):
"""Returns any and all headers for remembering the value, as a list.
Value is a standard Python type that shall be serializable using
JSON."""
def headers_forget():
"""Returns any and all headers for forgetting the current requests
value."""
class IAuthService(Interface):
"""Represents an authentication service. This service verifies that the
users authentication ticket is valid and returns groups the user is a
member of."""
def userid():
"""Return the current user id, None, or raise an error. Raising an
error is used when no attempt to verify a ticket has been made yet and
signifies that the authentication policy should attempt to call
``verify_ticket``"""
def groups():
"""Returns the groups for the current user, as a list. Including the
current userid in this list is not required, as it will be implicitly
added by the authentication policy."""
def verify_ticket(principal, ticket):
""" Verify that the principal matches the ticket given. """
def add_ticket(principal, ticket):
"""Add a new ticket for the principal. If there is a failure, due to a
missing/non-existent principal, or failure to add ticket for principal,
should raise an error"""
def remove_ticket(ticket):
""" Remove a ticket for the current user. Upon success return True """
|
from zope.interface import Attribute, Interface
class IAuthSourceService(Interface):
""" Represents an authentication source. """
vary = Attribute("List of HTTP headers to Vary the response by.")
def get_value():
""" Returns the opaque value that was stored. """
def headers_remember(value):
"""Returns any and all headers for remembering the value, as a list.
Value is a standard Python type that shall be serializable using
JSON."""
def headers_forget():
"""Returns any and all headers for forgetting the current requests
value."""
class IAuthService(Interface):
"""Represents an authentication service. This service verifies that the
users authentication ticket is valid and returns groups the user is a
member of."""
def userid():
"""Return the current user id, None, or raise an error. Raising an
error is used when no attempt to verify a ticket has been made yet and
signifies that the authentication policy should attempt to call
``verify_ticket``"""
def groups():
"""Returns the groups for the current user, as a list. Including the
current userid in this list is not required, as it will be implicitly
added by the authentication policy."""
def verify_ticket(principal, ticket):
""" Verify that the principal matches the ticket given. """
def add_ticket(principal, ticket):
"""Add a new ticket for the principal. If there is a failure, due to a
missing/non-existent principal, or failure to add ticket for principal,
should raise an error"""
def remove_ticket(ticket):
""" Remove a ticket for the current user. Upon success return True """
|
en
| 0.870244
|
Represents an authentication source. Returns the opaque value that was stored. Returns any and all headers for remembering the value, as a list. Value is a standard Python type that shall be serializable using JSON. Returns any and all headers for forgetting the current requests value. Represents an authentication service. This service verifies that the users authentication ticket is valid and returns groups the user is a member of. Return the current user id, None, or raise an error. Raising an error is used when no attempt to verify a ticket has been made yet and signifies that the authentication policy should attempt to call ``verify_ticket`` Returns the groups for the current user, as a list. Including the current userid in this list is not required, as it will be implicitly added by the authentication policy. Verify that the principal matches the ticket given. Add a new ticket for the principal. If there is a failure, due to a missing/non-existent principal, or failure to add ticket for principal, should raise an error Remove a ticket for the current user. Upon success return True
| 2.637848
| 3
|
src/admin_toolbelt/tasks.py
|
Elemnir/admin_toolbelt
| 0
|
6629675
|
<reponame>Elemnir/admin_toolbelt
import dramatiq
from .storage import create_path
def set_actor_queue(actor, queue_name):
actor.broker.declare_queue(queue_name)
actor.queue_name = queue_name
create_path = dramatiq.actor(create_path)
|
import dramatiq
from .storage import create_path
def set_actor_queue(actor, queue_name):
actor.broker.declare_queue(queue_name)
actor.queue_name = queue_name
create_path = dramatiq.actor(create_path)
|
none
| 1
| 1.767555
| 2
|
|
objects/CSCG/_3d/spaces/base/visualize/main.py
|
mathischeap/mifem
| 1
|
6629676
|
<reponame>mathischeap/mifem
from screws.freeze.main import FrozenOnly
from objects.CSCG._3d.spaces.base.visualize.matplot import _3dCSC_Space_Visualize_Matplot
class _3dCSC_Space_Visualize(FrozenOnly):
""""""
def __init__(self, space):
self._space_ = space
self._matplot_ = _3dCSC_Space_Visualize_Matplot(space)
self._freeze_self_()
def __call__(self, *args, **kwargs):
"""Call the default visualizer."""
return self.matplot(*args, **kwargs)
@property
def matplot(self):
return self._matplot_
|
from screws.freeze.main import FrozenOnly
from objects.CSCG._3d.spaces.base.visualize.matplot import _3dCSC_Space_Visualize_Matplot
class _3dCSC_Space_Visualize(FrozenOnly):
""""""
def __init__(self, space):
self._space_ = space
self._matplot_ = _3dCSC_Space_Visualize_Matplot(space)
self._freeze_self_()
def __call__(self, *args, **kwargs):
"""Call the default visualizer."""
return self.matplot(*args, **kwargs)
@property
def matplot(self):
return self._matplot_
|
en
| 0.079961
|
Call the default visualizer.
| 2.435537
| 2
|
nanopores/tools/__init__.py
|
jhwnkim/nanopores
| 0
|
6629677
|
<filename>nanopores/tools/__init__.py
from .illposed import *
from .errorest import *
from .geometry import *
from .pdesystem import *
from .utilities import *
from .coupled import *
from .physicsclass import *
from .protocol import *
from .mpipool import *
from .transientpde import *
from .box import *
from .axisym import *
from . import solvermethods
from .solvers import *
|
<filename>nanopores/tools/__init__.py
from .illposed import *
from .errorest import *
from .geometry import *
from .pdesystem import *
from .utilities import *
from .coupled import *
from .physicsclass import *
from .protocol import *
from .mpipool import *
from .transientpde import *
from .box import *
from .axisym import *
from . import solvermethods
from .solvers import *
|
none
| 1
| 0.983909
| 1
|
|
requests/httpclient.py
|
liugangabc/site-packages
| 0
|
6629678
|
<reponame>liugangabc/site-packages<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import copy
import requests
import pprint
def log(**data):
pass
pprint.pprint(data)
LOG = log
class HTTPClient(object):
def __init__(self, host="127.0.0.1",
port='80', protocol="http", timeout=3600,
allow_redirects=False):
self.url = "{protocol}://{host}:{port}".format(
**{"protocol": protocol, "host": host, "port": port})
self.session = requests.session()
self.option = dict(timeout=timeout, allow_redirects=allow_redirects)
def get(self, url="", data=None, **kwargs):
url = self.url + url
self.option.update(kwargs)
LOG(type="GET", url=url, data=data, option=self.option)
return self.session.get(url=url, data=data, **self.option)
def post(self, url="", data=None, json=None, **kwargs):
url = self.url + url
self.option.update(kwargs)
LOG(type="POST", url=url, data=data, json=json, option=self.option)
return self.session.post(url=url, json=json, data=data, **self.option)
def request(self, method, url, data, json, **kwargs):
url = self.url + url
self.option.update(kwargs)
LOG(type=method, url=url, data=data, json=json, option=self.option)
return self.session.request(method, url=url, json=json, data=data,
**self.option)
@property
def cookies(self):
return self.session.cookies.get_dict()
# test
# http = HTTPClient(port=8000)
# re = http.get(url="/api/user/")
class DjangoClient(object):
def __init__(self, host="", port="80", auth_url="", auth_data=None):
self.http = HTTPClient(host=host, port=port)
self.http.get(url=auth_url)
csrftoken = self.http.cookies["csrftoken"]
self.headers = {
"Content-Type": "application/x-www-form-urlencoded",
"X-CSRFToken": csrftoken
}
self.http.post(url=auth_url, data=auth_data, headers=self.headers)
def get(self, url="", data=None, **kwargs):
options = {
"headers": {
"Content-Type": "application/json",
}
}
options.update(kwargs)
return self.http.get(url=url, data=data, **options)
def post(self, url="", data=None, json=None, **kwargs):
if json:
options = {
"headers": {
"Content-Type": "application/json",
}
}
else:
options = {
"headers": {
"Content-Type": "application/x-www-form-urlencoded",
}
}
options.update(kwargs)
return self.http.post(url=url, json=json, data=data, **options)
def request(self, method, url, data=None, json=None, **kwargs):
if json:
options = {
"headers": {
"Content-Type": "application/json",
}
}
else:
options = {
"headers": {
"Content-Type": "application/x-www-form-urlencoded",
}
}
options.update(kwargs)
return self.http.request(method, url=url, json=json, data=data,
**options)
django = DjangoClient(
host="172.16.25.10",
auth_url="/auth/login/",
auth_data={
"username": "admin",
"password": "<PASSWORD>"
}
)
r = django.get(url="/restapi/")
print r.content
# 通过 Djangoclient实例测试 restful接口
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import copy
import requests
import pprint
def log(**data):
pass
pprint.pprint(data)
LOG = log
class HTTPClient(object):
def __init__(self, host="127.0.0.1",
port='80', protocol="http", timeout=3600,
allow_redirects=False):
self.url = "{protocol}://{host}:{port}".format(
**{"protocol": protocol, "host": host, "port": port})
self.session = requests.session()
self.option = dict(timeout=timeout, allow_redirects=allow_redirects)
def get(self, url="", data=None, **kwargs):
url = self.url + url
self.option.update(kwargs)
LOG(type="GET", url=url, data=data, option=self.option)
return self.session.get(url=url, data=data, **self.option)
def post(self, url="", data=None, json=None, **kwargs):
url = self.url + url
self.option.update(kwargs)
LOG(type="POST", url=url, data=data, json=json, option=self.option)
return self.session.post(url=url, json=json, data=data, **self.option)
def request(self, method, url, data, json, **kwargs):
url = self.url + url
self.option.update(kwargs)
LOG(type=method, url=url, data=data, json=json, option=self.option)
return self.session.request(method, url=url, json=json, data=data,
**self.option)
@property
def cookies(self):
return self.session.cookies.get_dict()
# test
# http = HTTPClient(port=8000)
# re = http.get(url="/api/user/")
class DjangoClient(object):
def __init__(self, host="", port="80", auth_url="", auth_data=None):
self.http = HTTPClient(host=host, port=port)
self.http.get(url=auth_url)
csrftoken = self.http.cookies["csrftoken"]
self.headers = {
"Content-Type": "application/x-www-form-urlencoded",
"X-CSRFToken": csrftoken
}
self.http.post(url=auth_url, data=auth_data, headers=self.headers)
def get(self, url="", data=None, **kwargs):
options = {
"headers": {
"Content-Type": "application/json",
}
}
options.update(kwargs)
return self.http.get(url=url, data=data, **options)
def post(self, url="", data=None, json=None, **kwargs):
if json:
options = {
"headers": {
"Content-Type": "application/json",
}
}
else:
options = {
"headers": {
"Content-Type": "application/x-www-form-urlencoded",
}
}
options.update(kwargs)
return self.http.post(url=url, json=json, data=data, **options)
def request(self, method, url, data=None, json=None, **kwargs):
if json:
options = {
"headers": {
"Content-Type": "application/json",
}
}
else:
options = {
"headers": {
"Content-Type": "application/x-www-form-urlencoded",
}
}
options.update(kwargs)
return self.http.request(method, url=url, json=json, data=data,
**options)
django = DjangoClient(
host="172.16.25.10",
auth_url="/auth/login/",
auth_data={
"username": "admin",
"password": "<PASSWORD>"
}
)
r = django.get(url="/restapi/")
print r.content
# 通过 Djangoclient实例测试 restful接口
|
en
| 0.280846
|
#!/usr/bin/python # -*- coding: utf-8 -*- # test # http = HTTPClient(port=8000) # re = http.get(url="/api/user/") # 通过 Djangoclient实例测试 restful接口
| 2.775044
| 3
|
references/classification/train.py
|
frgfm/PyroNear
| 9
|
6629679
|
# Copyright (C) 2019-2022, Pyronear.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import datetime
import os
import time
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import wandb
from holocron.models.presets import IMAGENET
from holocron.optim import AdamP
from holocron.trainer import BinaryClassificationTrainer
from torch.utils.data import RandomSampler, SequentialSampler
from torchvision.datasets import ImageFolder
from torchvision.transforms import transforms
from torchvision.transforms.functional import InterpolationMode, to_pil_image
import pyrovision
from pyrovision.datasets import OpenFire
def target_transform(target):
target = torch.tensor(target, dtype=torch.float32)
return target.unsqueeze(dim=0)
def plot_samples(images, targets, num_samples=4):
# Unnormalize image
nb_samples = min(num_samples, images.shape[0])
_, axes = plt.subplots(1, nb_samples, figsize=(20, 5))
for idx in range(nb_samples):
img = images[idx]
img *= torch.tensor(IMAGENET['std']).view(-1, 1, 1)
img += torch.tensor(IMAGENET['mean']).view(-1, 1, 1)
img = to_pil_image(img)
axes[idx].imshow(img)
axes[idx].axis('off')
if targets.ndim == 1:
axes[idx].set_title(IMAGENET['classes'][targets[idx].item()])
else:
class_idcs = torch.where(targets[idx] > 0)[0]
_info = [f"{IMAGENET['classes'][_idx.item()]} ({targets[idx, _idx]:.2f})" for _idx in class_idcs]
axes[idx].set_title(" ".join(_info))
plt.show()
def main(args):
print(args)
torch.backends.cudnn.benchmark = True
# Data loading code
normalize = transforms.Normalize(mean=IMAGENET['mean'],
std=IMAGENET['std]'])
interpolation = InterpolationMode.BILINEAR
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(size=args.img_size, scale=(0.8, 1.0), interpolation=interpolation),
transforms.RandomRotation(degrees=5, interpolation=interpolation),
transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
val_transforms = transforms.Compose([
transforms.Resize(size=args.img_size, interpolation=interpolation),
transforms.CenterCrop(size=args.img_size),
transforms.ToTensor(),
normalize,
])
print("Loading data")
if args.dataset == "openfire":
train_set = OpenFire(root=args.data_path, train=True, download=True,
transform=train_transforms)
val_set = OpenFire(root=args.data_path, train=False, download=True,
transform=val_transforms)
else:
train_dir = os.path.join(args.data_path, 'train')
val_dir = os.path.join(args.data_path, 'val')
train_set = ImageFolder(train_dir, train_transforms, target_transform=target_transform)
val_set = ImageFolder(val_dir, val_transforms, target_transform=target_transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, drop_last=True,
sampler=RandomSampler(train_set), num_workers=args.workers,
pin_memory=True)
if args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, target)
return
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, drop_last=False,
sampler=SequentialSampler(val_set), num_workers=args.workers,
pin_memory=True)
print("Creating model")
model = pyrovision.models.__dict__[args.arch](args.pretrained, num_classes=1)
criterion = nn.BCEWithLogitsLoss()
# Create the contiguous parameters.
model_params = [p for p in model.parameters() if p.requires_grad]
if args.opt == 'sgd':
optimizer = torch.optim.SGD(model_params, args.lr, momentum=0.9, weight_decay=args.weight_decay)
elif args.opt == 'radam':
optimizer = torch.optim.RAdam(model_params, args.lr,
betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay)
elif args.opt == 'adamp':
optimizer = AdamP(model_params, args.lr, betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay)
log_wb = lambda metrics: wandb.log(metrics) if args.wb else None
trainer = BinaryClassificationTrainer(model, train_loader, val_loader, criterion, optimizer,
args.device, args.output_file, amp=args.amp, on_epoch_end=log_wb)
if args.resume:
print(f"Resuming {args.resume}")
checkpoint = torch.load(args.resume, map_location='cpu')
trainer.load(checkpoint)
if args.test_only:
print("Running evaluation")
eval_metrics = trainer.evaluate()
print(f"Validation loss: {eval_metrics['val_loss']:.4} "
f"(Acc@1: {eval_metrics['acc1']:.2%}, Acc@5: {eval_metrics['acc5']:.2%})")
return
if args.find_lr:
print("Looking for optimal LR")
trainer.find_lr(args.freeze_until, num_it=min(len(train_loader), 100), norm_weight_decay=args.norm_wd)
trainer.plot_recorder()
return
# Training monitoring
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}-{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="pyrovision-image-classification",
config={
"learning_rate": args.lr,
"scheduler": args.sched,
"weight_decay": args.weight_decay,
"epochs": args.epochs,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.img_size,
"optimizer": args.opt,
"dataset": args.dataset,
"loss": "bce",
}
)
print("Start training")
start_time = time.time()
trainer.fit_n_epochs(args.epochs, args.lr, args.freeze_until, args.sched, norm_weight_decay=args.norm_wd)
total_time_str = str(datetime.timedelta(seconds=int(time.time() - start_time)))
print(f"Training time {total_time_str}")
if args.wb:
run.finish()
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Pyronear Classification Training',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('data_path', type=str, help='path to dataset folder')
parser.add_argument('--name', type=str, default=None, help='Name of your training experiment')
parser.add_argument('--arch', default='rexnet1_0x', type=str, help='model')
parser.add_argument('--dataset', default='openfire', type=str, help='dataset to train on')
parser.add_argument('--freeze-until', default=None, type=str, help='Last layer to freeze')
parser.add_argument('--device', default=None, type=int, help='device')
parser.add_argument('-b', '--batch-size', default=32, type=int, help='batch size')
parser.add_argument('--epochs', default=20, type=int, help='number of total epochs to run')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers')
parser.add_argument('--img-size', default=224, type=int, help='image size')
parser.add_argument('--opt', default='adamp', type=str, help='optimizer')
parser.add_argument('--sched', default='onecycle', type=str, help='Scheduler to be used')
parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--wd', '--weight-decay', default=0, type=float, help='weight decay', dest='weight_decay')
parser.add_argument('--norm-wd', default=None, type=float, help='weight decay of norm parameters')
parser.add_argument("--find-lr", dest='find_lr', action='store_true', help="Should you run LR Finder")
parser.add_argument("--show-samples", action='store_true', help="Whether training samples should be displayed")
parser.add_argument('--output-file', default='./model.pth', help='path where to save')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument("--test-only", dest="test_only", help="Only test the model", action="store_true")
parser.add_argument("--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo",
action="store_true")
parser.add_argument("--amp", help="Use Automatic Mixed Precision", action="store_true")
parser.add_argument('--wb', action='store_true', help='Log to Weights & Biases')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2019-2022, Pyronear.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import datetime
import os
import time
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import wandb
from holocron.models.presets import IMAGENET
from holocron.optim import AdamP
from holocron.trainer import BinaryClassificationTrainer
from torch.utils.data import RandomSampler, SequentialSampler
from torchvision.datasets import ImageFolder
from torchvision.transforms import transforms
from torchvision.transforms.functional import InterpolationMode, to_pil_image
import pyrovision
from pyrovision.datasets import OpenFire
def target_transform(target):
target = torch.tensor(target, dtype=torch.float32)
return target.unsqueeze(dim=0)
def plot_samples(images, targets, num_samples=4):
# Unnormalize image
nb_samples = min(num_samples, images.shape[0])
_, axes = plt.subplots(1, nb_samples, figsize=(20, 5))
for idx in range(nb_samples):
img = images[idx]
img *= torch.tensor(IMAGENET['std']).view(-1, 1, 1)
img += torch.tensor(IMAGENET['mean']).view(-1, 1, 1)
img = to_pil_image(img)
axes[idx].imshow(img)
axes[idx].axis('off')
if targets.ndim == 1:
axes[idx].set_title(IMAGENET['classes'][targets[idx].item()])
else:
class_idcs = torch.where(targets[idx] > 0)[0]
_info = [f"{IMAGENET['classes'][_idx.item()]} ({targets[idx, _idx]:.2f})" for _idx in class_idcs]
axes[idx].set_title(" ".join(_info))
plt.show()
def main(args):
print(args)
torch.backends.cudnn.benchmark = True
# Data loading code
normalize = transforms.Normalize(mean=IMAGENET['mean'],
std=IMAGENET['std]'])
interpolation = InterpolationMode.BILINEAR
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(size=args.img_size, scale=(0.8, 1.0), interpolation=interpolation),
transforms.RandomRotation(degrees=5, interpolation=interpolation),
transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
val_transforms = transforms.Compose([
transforms.Resize(size=args.img_size, interpolation=interpolation),
transforms.CenterCrop(size=args.img_size),
transforms.ToTensor(),
normalize,
])
print("Loading data")
if args.dataset == "openfire":
train_set = OpenFire(root=args.data_path, train=True, download=True,
transform=train_transforms)
val_set = OpenFire(root=args.data_path, train=False, download=True,
transform=val_transforms)
else:
train_dir = os.path.join(args.data_path, 'train')
val_dir = os.path.join(args.data_path, 'val')
train_set = ImageFolder(train_dir, train_transforms, target_transform=target_transform)
val_set = ImageFolder(val_dir, val_transforms, target_transform=target_transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, drop_last=True,
sampler=RandomSampler(train_set), num_workers=args.workers,
pin_memory=True)
if args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, target)
return
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, drop_last=False,
sampler=SequentialSampler(val_set), num_workers=args.workers,
pin_memory=True)
print("Creating model")
model = pyrovision.models.__dict__[args.arch](args.pretrained, num_classes=1)
criterion = nn.BCEWithLogitsLoss()
# Create the contiguous parameters.
model_params = [p for p in model.parameters() if p.requires_grad]
if args.opt == 'sgd':
optimizer = torch.optim.SGD(model_params, args.lr, momentum=0.9, weight_decay=args.weight_decay)
elif args.opt == 'radam':
optimizer = torch.optim.RAdam(model_params, args.lr,
betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay)
elif args.opt == 'adamp':
optimizer = AdamP(model_params, args.lr, betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay)
log_wb = lambda metrics: wandb.log(metrics) if args.wb else None
trainer = BinaryClassificationTrainer(model, train_loader, val_loader, criterion, optimizer,
args.device, args.output_file, amp=args.amp, on_epoch_end=log_wb)
if args.resume:
print(f"Resuming {args.resume}")
checkpoint = torch.load(args.resume, map_location='cpu')
trainer.load(checkpoint)
if args.test_only:
print("Running evaluation")
eval_metrics = trainer.evaluate()
print(f"Validation loss: {eval_metrics['val_loss']:.4} "
f"(Acc@1: {eval_metrics['acc1']:.2%}, Acc@5: {eval_metrics['acc5']:.2%})")
return
if args.find_lr:
print("Looking for optimal LR")
trainer.find_lr(args.freeze_until, num_it=min(len(train_loader), 100), norm_weight_decay=args.norm_wd)
trainer.plot_recorder()
return
# Training monitoring
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}-{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="pyrovision-image-classification",
config={
"learning_rate": args.lr,
"scheduler": args.sched,
"weight_decay": args.weight_decay,
"epochs": args.epochs,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.img_size,
"optimizer": args.opt,
"dataset": args.dataset,
"loss": "bce",
}
)
print("Start training")
start_time = time.time()
trainer.fit_n_epochs(args.epochs, args.lr, args.freeze_until, args.sched, norm_weight_decay=args.norm_wd)
total_time_str = str(datetime.timedelta(seconds=int(time.time() - start_time)))
print(f"Training time {total_time_str}")
if args.wb:
run.finish()
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Pyronear Classification Training',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('data_path', type=str, help='path to dataset folder')
parser.add_argument('--name', type=str, default=None, help='Name of your training experiment')
parser.add_argument('--arch', default='rexnet1_0x', type=str, help='model')
parser.add_argument('--dataset', default='openfire', type=str, help='dataset to train on')
parser.add_argument('--freeze-until', default=None, type=str, help='Last layer to freeze')
parser.add_argument('--device', default=None, type=int, help='device')
parser.add_argument('-b', '--batch-size', default=32, type=int, help='batch size')
parser.add_argument('--epochs', default=20, type=int, help='number of total epochs to run')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers')
parser.add_argument('--img-size', default=224, type=int, help='image size')
parser.add_argument('--opt', default='adamp', type=str, help='optimizer')
parser.add_argument('--sched', default='onecycle', type=str, help='Scheduler to be used')
parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--wd', '--weight-decay', default=0, type=float, help='weight decay', dest='weight_decay')
parser.add_argument('--norm-wd', default=None, type=float, help='weight decay of norm parameters')
parser.add_argument("--find-lr", dest='find_lr', action='store_true', help="Should you run LR Finder")
parser.add_argument("--show-samples", action='store_true', help="Whether training samples should be displayed")
parser.add_argument('--output-file', default='./model.pth', help='path where to save')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument("--test-only", dest="test_only", help="Only test the model", action="store_true")
parser.add_argument("--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo",
action="store_true")
parser.add_argument("--amp", help="Use Automatic Mixed Precision", action="store_true")
parser.add_argument('--wb', action='store_true', help='Log to Weights & Biases')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
en
| 0.766253
|
# Copyright (C) 2019-2022, Pyronear. # This program is licensed under the Apache License version 2. # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details. # Unnormalize image # Data loading code # Create the contiguous parameters. # Training monitoring # W&B
| 2.019078
| 2
|
04 - Drawing canvas, timers/exercises/drawing_works.py
|
PableraShow/python-exercises
| 8
|
6629680
|
<gh_stars>1-10
""" Modify the following program template to print "It works!" on the canvas. """
import simplegui
# Draw handler
def draw(canvas):
canvas.draw_text('It works!',[120, 112], 48, 'Red')
# Create frame and assign callbacks to event handlers
frame = simplegui.create_frame('It works', 400, 200)
frame.set_draw_handler(draw)
# Start the frame animation
frame.start()
|
""" Modify the following program template to print "It works!" on the canvas. """
import simplegui
# Draw handler
def draw(canvas):
canvas.draw_text('It works!',[120, 112], 48, 'Red')
# Create frame and assign callbacks to event handlers
frame = simplegui.create_frame('It works', 400, 200)
frame.set_draw_handler(draw)
# Start the frame animation
frame.start()
|
en
| 0.75531
|
Modify the following program template to print "It works!" on the canvas. # Draw handler # Create frame and assign callbacks to event handlers # Start the frame animation
| 3.711693
| 4
|
metadata_service/__init__.py
|
samshuster/amundsenmetadatalibrary
| 0
|
6629681
|
<reponame>samshuster/amundsenmetadatalibrary<gh_stars>0
import ast
import importlib
import logging
import logging.config
import os
import sys
from typing import Dict, Any # noqa: F401
from flasgger import Swagger
from flask import Flask, Blueprint
from flask_restful import Api
from metadata_service.api.column import ColumnDescriptionAPI
from metadata_service.api.healthcheck import healthcheck
from metadata_service.api.popular_tables import PopularTablesAPI
from metadata_service.api.system import Neo4jDetailAPI
from metadata_service.api.table \
import TableDetailAPI, TableOwnerAPI, TableTagAPI, TableDescriptionAPI
from metadata_service.api.tag import TagAPI
from metadata_service.api.user import (UserDetailAPI, UserFollowAPI,
UserFollowsAPI, UserOwnsAPI,
UserOwnAPI, UserReadsAPI)
# For customized flask use below arguments to override.
FLASK_APP_MODULE_NAME = os.getenv('FLASK_APP_MODULE_NAME')
FLASK_APP_CLASS_NAME = os.getenv('FLASK_APP_CLASS_NAME')
FLASK_APP_KWARGS_DICT_STR = os.getenv('FLASK_APP_KWARGS_DICT')
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def create_app(*, config_module_class: str) -> Flask:
"""
Creates app in function so that flask with flask extensions can be
initialized with specific config. Here it defines the route of APIs
so that it can be seen in one place where implementation is separated.
Config is being fetched via module.class name where module.class name
can be passed through environment variable.
This is to make config fetched through runtime PYTHON_PATH so that
Config class can be easily injected.
More on: http://flask.pocoo.org/docs/1.0/config/
:param config_module_class: name of the config (TODO: Implement config.py)
:return: Flask
"""
if FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME:
print('Using requested Flask module {module_name} and class {class_name}'
.format(module_name=FLASK_APP_MODULE_NAME, class_name=FLASK_APP_CLASS_NAME), file=sys.stderr)
class_obj = getattr(importlib.import_module(FLASK_APP_MODULE_NAME), FLASK_APP_CLASS_NAME)
flask_kwargs_dict = {} # type: Dict[str, Any]
if FLASK_APP_KWARGS_DICT_STR:
print('Using kwargs {kwargs} to instantiate Flask'.format(kwargs=FLASK_APP_KWARGS_DICT_STR),
file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
config_module_class = \
os.getenv('METADATA_SVC_CONFIG_MODULE_CLASS') or config_module_class
app.config.from_object(config_module_class)
if app.config.get('LOG_CONFIG_FILE'):
logging.config.fileConfig(app.config.get('LOG_CONFIG_FILE'), disable_existing_loggers=False)
else:
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Created app with config name {}'.format(config_module_class))
logging.info('Using backend {}'.format(app.config.get('PROXY_CLIENT')))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
api.add_resource(PopularTablesAPI, '/popular_tables/')
api.add_resource(TableDetailAPI, '/table/<path:table_uri>')
api.add_resource(TableDescriptionAPI,
'/table/<path:table_uri>/description')
api.add_resource(TableTagAPI,
'/table/<path:table_uri>/tag/<tag>')
api.add_resource(TableOwnerAPI,
'/table/<path:table_uri>/owner/<owner>')
api.add_resource(ColumnDescriptionAPI,
'/table/<path:table_uri>/column/<column_name>/description')
api.add_resource(Neo4jDetailAPI,
'/latest_updated_ts')
api.add_resource(TagAPI,
'/tags/')
api.add_resource(UserDetailAPI,
'/user',
'/user/<path:id>')
api.add_resource(UserFollowsAPI,
'/user/<path:user_id>/follow/')
api.add_resource(UserFollowAPI,
'/user/<path:user_id>/follow/<resource_type>/<path:table_uri>')
api.add_resource(UserOwnsAPI,
'/user/<path:user_id>/own/')
api.add_resource(UserOwnAPI,
'/user/<path:user_id>/own/<resource_type>/<path:table_uri>')
api.add_resource(UserReadsAPI,
'/user/<path:user_id>/read/')
app.register_blueprint(api_bp)
if app.config.get('SWAGGER_ENABLED'):
Swagger(app, template_file=os.path.join(ROOT_DIR, app.config.get('SWAGGER_TEMPLATE_PATH')), parse=True)
return app
|
import ast
import importlib
import logging
import logging.config
import os
import sys
from typing import Dict, Any # noqa: F401
from flasgger import Swagger
from flask import Flask, Blueprint
from flask_restful import Api
from metadata_service.api.column import ColumnDescriptionAPI
from metadata_service.api.healthcheck import healthcheck
from metadata_service.api.popular_tables import PopularTablesAPI
from metadata_service.api.system import Neo4jDetailAPI
from metadata_service.api.table \
import TableDetailAPI, TableOwnerAPI, TableTagAPI, TableDescriptionAPI
from metadata_service.api.tag import TagAPI
from metadata_service.api.user import (UserDetailAPI, UserFollowAPI,
UserFollowsAPI, UserOwnsAPI,
UserOwnAPI, UserReadsAPI)
# For customized flask use below arguments to override.
FLASK_APP_MODULE_NAME = os.getenv('FLASK_APP_MODULE_NAME')
FLASK_APP_CLASS_NAME = os.getenv('FLASK_APP_CLASS_NAME')
FLASK_APP_KWARGS_DICT_STR = os.getenv('FLASK_APP_KWARGS_DICT')
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def create_app(*, config_module_class: str) -> Flask:
"""
Creates app in function so that flask with flask extensions can be
initialized with specific config. Here it defines the route of APIs
so that it can be seen in one place where implementation is separated.
Config is being fetched via module.class name where module.class name
can be passed through environment variable.
This is to make config fetched through runtime PYTHON_PATH so that
Config class can be easily injected.
More on: http://flask.pocoo.org/docs/1.0/config/
:param config_module_class: name of the config (TODO: Implement config.py)
:return: Flask
"""
if FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME:
print('Using requested Flask module {module_name} and class {class_name}'
.format(module_name=FLASK_APP_MODULE_NAME, class_name=FLASK_APP_CLASS_NAME), file=sys.stderr)
class_obj = getattr(importlib.import_module(FLASK_APP_MODULE_NAME), FLASK_APP_CLASS_NAME)
flask_kwargs_dict = {} # type: Dict[str, Any]
if FLASK_APP_KWARGS_DICT_STR:
print('Using kwargs {kwargs} to instantiate Flask'.format(kwargs=FLASK_APP_KWARGS_DICT_STR),
file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
config_module_class = \
os.getenv('METADATA_SVC_CONFIG_MODULE_CLASS') or config_module_class
app.config.from_object(config_module_class)
if app.config.get('LOG_CONFIG_FILE'):
logging.config.fileConfig(app.config.get('LOG_CONFIG_FILE'), disable_existing_loggers=False)
else:
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Created app with config name {}'.format(config_module_class))
logging.info('Using backend {}'.format(app.config.get('PROXY_CLIENT')))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
api.add_resource(PopularTablesAPI, '/popular_tables/')
api.add_resource(TableDetailAPI, '/table/<path:table_uri>')
api.add_resource(TableDescriptionAPI,
'/table/<path:table_uri>/description')
api.add_resource(TableTagAPI,
'/table/<path:table_uri>/tag/<tag>')
api.add_resource(TableOwnerAPI,
'/table/<path:table_uri>/owner/<owner>')
api.add_resource(ColumnDescriptionAPI,
'/table/<path:table_uri>/column/<column_name>/description')
api.add_resource(Neo4jDetailAPI,
'/latest_updated_ts')
api.add_resource(TagAPI,
'/tags/')
api.add_resource(UserDetailAPI,
'/user',
'/user/<path:id>')
api.add_resource(UserFollowsAPI,
'/user/<path:user_id>/follow/')
api.add_resource(UserFollowAPI,
'/user/<path:user_id>/follow/<resource_type>/<path:table_uri>')
api.add_resource(UserOwnsAPI,
'/user/<path:user_id>/own/')
api.add_resource(UserOwnAPI,
'/user/<path:user_id>/own/<resource_type>/<path:table_uri>')
api.add_resource(UserReadsAPI,
'/user/<path:user_id>/read/')
app.register_blueprint(api_bp)
if app.config.get('SWAGGER_ENABLED'):
Swagger(app, template_file=os.path.join(ROOT_DIR, app.config.get('SWAGGER_TEMPLATE_PATH')), parse=True)
return app
|
en
| 0.70572
|
# noqa: F401 # For customized flask use below arguments to override. Creates app in function so that flask with flask extensions can be initialized with specific config. Here it defines the route of APIs so that it can be seen in one place where implementation is separated. Config is being fetched via module.class name where module.class name can be passed through environment variable. This is to make config fetched through runtime PYTHON_PATH so that Config class can be easily injected. More on: http://flask.pocoo.org/docs/1.0/config/ :param config_module_class: name of the config (TODO: Implement config.py) :return: Flask # type: Dict[str, Any]
| 2.203048
| 2
|
src/CIFAR10_Image_Recognizer.py
|
vikilabs/Image_Recognition_CNN_CIFAR10
| 1
|
6629682
|
<filename>src/CIFAR10_Image_Recognizer.py
'''
CIFAR Image Recognization with Convolutional Neural Network
'''
import sys
sys.path.append('../packages')
import VikiLabs_SimpleUI as UI
from VikiLabs_Logger import *
from VikiLabs_CIFAR_Wrapper import *
from CNN import *
from VikiLabs_Tensor_Wrapper import *
log = logger()
net = None
cifar_path = '../data'
image_path = '../images'
model_file = '../model/torch.pt'
training_batch_size = 64
#training_batch_size = 128
test_batch_size = 1000
learning_rate = 0.0001
momentum = 0.9
def ui_callback(file_name):
global net
tensor = ReadCIFAR_ImageAsTensor(file_name)
prediction = ClassifyImage(net, tensor)
return prediction
training_object = Download_CIFAR_TrainingData(cifar_path)
test_object = Download_CIFAR_TestData(cifar_path)
training_data = Load_CIFAR_Data( training_object, training_batch_size )
test_data = Load_CIFAR_Data( test_object, test_batch_size )
''' Initialize CNN '''
try:
net = LoadModel(model_file)
except:
pass
if not net:
print(log._er+ "LoadModel CNN Model")
net = CNN()
''' Common loss function for classification
'''
loss_function = nn.CrossEntropyLoss()
parameters = net.parameters()
betas = (0.9, 0.999)
''' Create an Optimizer '''
#optimizer = Optimizer_SGD(parameters, learning_rate, momentum)
''' ADAM Optimizer Learning is fast '''
optimizer = Optimizer_ADAM(parameters, learning_rate, betas)
train_cnn(net, training_data, training_batch_size, optimizer, loss_function)
SaveModel(net, model_file)
else:
print(log._if+ "LoadModel CNN Model")
UI.render(ui_callback)
|
<filename>src/CIFAR10_Image_Recognizer.py
'''
CIFAR Image Recognization with Convolutional Neural Network
'''
import sys
sys.path.append('../packages')
import VikiLabs_SimpleUI as UI
from VikiLabs_Logger import *
from VikiLabs_CIFAR_Wrapper import *
from CNN import *
from VikiLabs_Tensor_Wrapper import *
log = logger()
net = None
cifar_path = '../data'
image_path = '../images'
model_file = '../model/torch.pt'
training_batch_size = 64
#training_batch_size = 128
test_batch_size = 1000
learning_rate = 0.0001
momentum = 0.9
def ui_callback(file_name):
global net
tensor = ReadCIFAR_ImageAsTensor(file_name)
prediction = ClassifyImage(net, tensor)
return prediction
training_object = Download_CIFAR_TrainingData(cifar_path)
test_object = Download_CIFAR_TestData(cifar_path)
training_data = Load_CIFAR_Data( training_object, training_batch_size )
test_data = Load_CIFAR_Data( test_object, test_batch_size )
''' Initialize CNN '''
try:
net = LoadModel(model_file)
except:
pass
if not net:
print(log._er+ "LoadModel CNN Model")
net = CNN()
''' Common loss function for classification
'''
loss_function = nn.CrossEntropyLoss()
parameters = net.parameters()
betas = (0.9, 0.999)
''' Create an Optimizer '''
#optimizer = Optimizer_SGD(parameters, learning_rate, momentum)
''' ADAM Optimizer Learning is fast '''
optimizer = Optimizer_ADAM(parameters, learning_rate, betas)
train_cnn(net, training_data, training_batch_size, optimizer, loss_function)
SaveModel(net, model_file)
else:
print(log._if+ "LoadModel CNN Model")
UI.render(ui_callback)
|
en
| 0.674355
|
CIFAR Image Recognization with Convolutional Neural Network #training_batch_size = 128 Initialize CNN Common loss function for classification Create an Optimizer #optimizer = Optimizer_SGD(parameters, learning_rate, momentum) ADAM Optimizer Learning is fast
| 2.902751
| 3
|
mwptoolkit/trainer/supervised_trainer.py
|
monaanvari/MWPToolkit
| 0
|
6629683
|
from mwptoolkit.utils.utils import time_since, write_json_data
from mwptoolkit.utils.enum_type import TaskType, DatasetType, SpecialTokens
from mwptoolkit.trainer.template_trainer import TemplateTrainer
from mwptoolkit.trainer.abstract_trainer import AbstractTrainer
from ray import tune
import torch
from itertools import groupby
import math
import time
# -*- encoding: utf-8 -*-
# @Author: <NAME>
# @Time: 2021/08/29 22:14:01
# @File: supervised_trainer.py
class SupervisedTrainer(AbstractTrainer):
"""supervised trainer, used to implement training, testing, parameter searching in supervised learning.
example of instantiation:
>>> trainer = SupervisedTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model
train_batch_size (int): the training batch size.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
#self._build_loss(config["symbol_size"], self.dataloader.dataset.out_symbol2idx[SpecialTokens.PAD_TOKEN])
def _build_optimizer(self):
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=self.config["learning_rate"])
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
#check_pnt = torch.load(self.config["checkpoint_path"],map_location="cpu")
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.optimizer.load_state_dict(check_pnt["optimizer"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _idx2word_2idx(self, batch_equation):
batch_size, length = batch_equation.size()
batch_equation_ = []
for b in range(batch_size):
equation = []
for idx in range(length):
equation.append(self.dataloader.dataset.out_symbol2idx[
self.dataloader.dataset.in_idx2word[
batch_equation[b, idx]]])
batch_equation_.append(equation)
batch_equation_ = torch.LongTensor(
batch_equation_).to(self.config["device"])
return batch_equation_
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
questions, test_out, target = self.model.model_test(batch)
def polish_to_infix(equation):
final_eq = []
expressions = []
operators = ['+', '-', '*', '/']
for symbol in equation:
expressions.append(symbol)
while(len(expressions) >= 2):
n1 = expressions.pop()
n2 = expressions.pop()
if((n1 in operators) or (n2 in operators)):
expressions.append(n2)
expressions.append(n1)
break
op = expressions.pop()
feq = '(' + n2 + op + n1 + ')'
expressions.append(feq)
print(expressions)
return expressions.pop()
batch_size = len(test_out)
val_acc = []
equ_acc = []
# Added by Shyamoli
lst_questions = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
# Added to print questions
question_in_words = ''
if(val_ac == False):
for g in questions[idx]:
question_in_words += (
self.dataloader.dataset.in_idx2word[g] + " ")
lst_questions.append(question_in_words)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
for i in range(batch_size):
if(val_ac == True):
continue
print("Question:", lst_questions[i])
#print("Test output Polish", test_out[i])
#print("Target Polish", target[i])
print("Test output", polish_to_infix(test_out[i]))
print("Target", polish_to_infix(target[i]))
print("Val Acc", val_acc[i])
print("Equation Acc", equ_acc[i])
print('')
print('')
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.model.train()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
self.optimizer.step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
self._save_checkpoint()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def param_search(self):
"""hyper-parameter search.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class GTSTrainer(AbstractTrainer):
"""gts trainer, used to implement training, testing, parameter searching for deep-learning model GTS.
example of instantiation:
>>> trainer = GTSTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
embedding_learning_rate (float): learning rate of embedding module.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
def _build_optimizer(self):
# optimizer
# self.encoder_optimizer = torch.optim.Adam(
# [
# {'params': self.model.embedder.parameters()}, \
# {'params': self.model.encoder.parameters()}
# ],
# self.config["learning_rate"]
# )
self.embedder_optimizer = torch.optim.Adam(self.model.embedder.parameters(
), self.config["embedding_learning_rate"], weight_decay=self.config["weight_decay"])
self.encoder_optimizer = torch.optim.Adam(self.model.encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.decoder_optimizer = torch.optim.Adam(self.model.decoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.node_generater_optimizer = torch.optim.Adam(self.model.node_generater.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.merge_optimizer = torch.optim.Adam(self.model.merge.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
self.embedder_scheduler = torch.optim.lr_scheduler.StepLR(
self.embedder_optimizer, step_size=self.config["step_size"], gamma=0.5,)
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.decoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.node_generater_scheduler = torch.optim.lr_scheduler.StepLR(
self.node_generater_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.merge_scheduler = torch.optim.lr_scheduler.StepLR(
self.merge_optimizer, step_size=self.config["step_size"], gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"embedder_optimizer": self.embedder_optimizer.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.node_generater_optimizer.state_dict(),
"merge_optimizer": self.merge_optimizer.state_dict(),
"embedder_scheduler": self.embedder_scheduler.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.node_generater_scheduler.state_dict(),
"merge_scheduler": self.merge_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.embedder_optimizer.load_state_dict(
check_pnt["embedder_optimizer"])
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.node_generater_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
self.merge_optimizer.load_state_dict(check_pnt["merge_optimizer"])
# load parameter of scheduler
self.embedder_scheduler.load_state_dict(
check_pnt['embedder_scheduler'])
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
self.merge_scheduler.load_state_dict(check_pnt["merge_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.embedder_scheduler.step()
self.encoder_scheduler.step()
self.decoder_scheduler.step()
self.node_generater_scheduler.step()
self.merge_scheduler.step()
def _optimizer_step(self):
self.embedder_optimizer.step()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.node_generater_optimizer.step()
self.merge_optimizer.step()
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.model.train()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
self._optimizer_step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 1 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
pass
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def param_search(self):
"""hyper-parameter search.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class MultiEncDecTrainer(GTSTrainer):
"""multiencdec trainer, used to implement training, testing, parameter searching for deep-learning model MultiE&D.
example of instantiation:
>>> trainer = MultiEncDecTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
# optimizer
# self.embedder_optimizer = torch.optim.Adam(self.model.embedder.parameters(), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.encoder_optimizer = torch.optim.Adam(self.model.encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.numencoder_optimizer = torch.optim.Adam(self.model.numencoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.predict_optimizer = torch.optim.Adam(self.model.predict.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.decoder_optimizer = torch.optim.Adam(self.model.decoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.generate_optimizer = torch.optim.Adam(self.model.generate.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.merge_optimizer = torch.optim.Adam(self.model.merge.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
#self.optimizer = torch.optim.Adam(self.model.parameters(), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
#self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.config["step_size"], gamma=0.5)
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.numencoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.numencoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.predict_scheduler = torch.optim.lr_scheduler.StepLR(
self.predict_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.decoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.generate_scheduler = torch.optim.lr_scheduler.StepLR(
self.generate_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.merge_scheduler = torch.optim.lr_scheduler.StepLR(
self.merge_optimizer, step_size=self.config["step_size"], gamma=0.5)
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
# self.optimizer.load_state_dict(check_pnt['optimizer'])
self.numencoder_optimizer.load_state_dict(
check_pnt["numencoder_optimizer"])
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.predict_optimizer.load_state_dict(check_pnt['predict_optimizer'])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.generate_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
self.merge_optimizer.load_state_dict(check_pnt["merge_optimizer"])
# load parameter of scheduler
# self.scheduler.load_state_dict(check_pnt['scheduler'])
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.numencoder_scheduler.load_state_dict(
check_pnt["numencoder_scheduler"])
self.predict_scheduler.load_state_dict(check_pnt['predict_scheduler'])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
self.merge_scheduler.load_state_dict(check_pnt["merge_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"numencoder_optimizer": self.numencoder_optimizer.state_dict(),
"predict_optimizer": self.predict_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.generate_optimizer.state_dict(),
"merge_optimizer": self.merge_optimizer.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"numencoder_scheduler": self.numencoder_scheduler.state_dict(),
"predict_scheduler": self.predict_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.generate_scheduler.state_dict(),
"merge_scheduler": self.merge_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _scheduler_step(self):
# self.scheduler.step()
self.encoder_scheduler.step()
self.numencoder_scheduler.step()
self.predict_scheduler.step()
self.decoder_scheduler.step()
self.generate_scheduler.step()
self.merge_scheduler.step()
def _optimizer_step(self):
# self.optimizer.step()
self.encoder_optimizer.step()
self.numencoder_optimizer.step()
self.predict_optimizer.step()
self.decoder_optimizer.step()
self.generate_optimizer.step()
self.merge_optimizer.step()
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
out_type, test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation and out_type == 'tree':
val_ac, equ_ac, _, _ = self.evaluator.prefix_result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.SingleEquation and out_type == 'attn':
val_ac, equ_ac, _, _ = self.evaluator.postfix_result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation and out_type == 'tree':
val_ac, equ_ac, _, _ = self.evaluator.prefix_result_multi(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation and out_type == 'attn':
val_ac, equ_ac, _, _ = self.evaluator.postfix_result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'decoder': out_type,
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
class Graph2TreeTrainer(GTSTrainer):
"""graph2tree trainer, used to implement training, testing, parameter searching for deep-learning model Graph2Tree.
example of instantiation:
>>> trainer = Graph2TreeTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
embedding_learning_rate (float): learning rate of embedding module.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
class TreeLSTMTrainer(AbstractTrainer):
"""treelstm trainer, used to implement training, testing, parameter searching for deep-learning model TreeLSTM.
example of instantiation:
>>> trainer = TreeLSTMTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
def _build_optimizer(self):
# optimizer
self.embedder_optimizer = torch.optim.Adam(self.model.embedder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.encoder_optimizer = torch.optim.Adam(self.model.encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.decoder_optimizer = torch.optim.Adam(self.model.decoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.node_generater_optimizer = torch.optim.Adam(self.model.node_generater.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
self.embedder_scheduler = torch.optim.lr_scheduler.StepLR(
self.embedder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.decoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.node_generater_scheduler = torch.optim.lr_scheduler.StepLR(
self.node_generater_optimizer, step_size=self.config["step_size"], gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"embedder_optimizer": self.embedder_optimizer.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.node_generater_optimizer.state_dict(),
"embedder_scheduler": self.embedder_scheduler.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.node_generater_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.embedder_optimizer.load_state_dict(
check_pnt["embedder_optimizer"])
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.node_generater_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
# load parameter of scheduler
self.embedder_scheduler.load_state_dict(
check_pnt["embedder_scheduler"])
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.embedder_scheduler.step()
self.encoder_scheduler.step()
self.decoder_scheduler.step()
self.node_generater_scheduler.step()
def _optimizer_step(self):
self.embedder_optimizer.step()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.node_generater_optimizer.step()
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.model.train()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
self._optimizer_step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def param_search(self):
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class SAUSolverTrainer(GTSTrainer):
"""sausolver trainer, used to implement training, testing, parameter searching for deep-learning model SAUSolver.
example of instantiation:
>>> trainer = SAUSolverTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
# batch['ans'][idx] = [12,8]
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _build_optimizer(self):
self.embedder_optimizer = torch.optim.Adam(self.model.embedder.parameters(),
self.config["embedding_learning_rate"],
weight_decay=self.config["weight_decay"])
self.encoder_optimizer = torch.optim.Adam(self.model.encoder.parameters(), self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
self.decoder_optimizer = torch.optim.Adam(self.model.decoder.parameters(), self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
self.node_generater_optimizer = torch.optim.Adam(self.model.node_generater.parameters(),
self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
self.merge_optimizer = torch.optim.Adam(self.model.merge.parameters(), self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
self.sa_optimizer = torch.optim.Adam(self.model.sa.parameters(), self.config['learning_rate'],
weight_decay=self.config["weight_decay"])
# scheduler
self.embedder_scheduler = torch.optim.lr_scheduler.StepLR(self.embedder_optimizer,
step_size=self.config["step_size"], gamma=0.5, )
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(self.encoder_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(self.decoder_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.node_generater_scheduler = torch.optim.lr_scheduler.StepLR(self.node_generater_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.merge_scheduler = torch.optim.lr_scheduler.StepLR(self.merge_optimizer, step_size=self.config["step_size"],
gamma=0.5)
self.sa_scheduler = torch.optim.lr_scheduler.StepLR(self.sa_optimizer, step_size=self.config['step_size'],
gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"embedder_optimizer": self.embedder_optimizer.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.node_generater_optimizer.state_dict(),
"merge_optimizer": self.merge_optimizer.state_dict(),
"sa_optimizer": self.sa_optimizer.state_dict(),
"embedder_scheduler": self.embedder_scheduler.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.node_generater_scheduler.state_dict(),
"merge_scheduler": self.merge_scheduler.state_dict(),
"sa_scheduler": self.sa_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.embedder_optimizer.load_state_dict(
check_pnt["embedder_optimizer"])
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.node_generater_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
self.merge_optimizer.load_state_dict(check_pnt["merge_optimizer"])
self.sa_optimizer.load_state_dict(check_pnt["sa_optimizer"])
# load parameter of scheduler
self.embedder_scheduler.load_state_dict(
check_pnt['embedder_scheduler'])
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
self.merge_scheduler.load_state_dict(check_pnt["merge_scheduler"])
self.sa_scheduler.load_state_dict(check_pnt["sa_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.embedder_scheduler.step()
self.encoder_scheduler.step()
self.decoder_scheduler.step()
self.node_generater_scheduler.step()
self.merge_scheduler.step()
self.sa_scheduler.step()
def _optimizer_step(self):
self.embedder_optimizer.step()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.node_generater_optimizer.step()
self.merge_optimizer.step()
self.sa_optimizer.step()
class TRNNTrainer(SupervisedTrainer):
"""trnn trainer, used to implement training, testing, parameter searching for deep-learning model TRNN.
example of instantiation:
>>> trainer = TRNNTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
seq2seq_learning_rate (float): learning rate of seq2seq module.
ans_learning_rate (float): learning rate of answer module.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
def _build_optimizer(self):
#self.optimizer = torch.optim.Adam(self.model.parameters(),self.config["learning_rate"])
self.optimizer = torch.optim.Adam(
[
{'params': self.model.seq2seq_in_embedder.parameters()},
{'params': self.model.seq2seq_out_embedder.parameters()},
{'params': self.model.seq2seq_encoder.parameters()},
{'params': self.model.seq2seq_decoder.parameters()},
{'params': self.model.seq2seq_gen_linear.parameters()}
],
self.config["seq2seq_learning_rate"]
)
self.answer_module_optimizer = torch.optim.SGD(
[
{'params': self.model.answer_in_embedder.parameters()},
{'params': self.model.answer_encoder.parameters()},
{'params': self.model.answer_rnn.parameters()}
],
self.config["ans_learning_rate"],
momentum=0.9
)
def _seq2seq_train(self):
self.model.seq2seq_in_embedder.train()
self.model.seq2seq_out_embedder.train()
self.model.seq2seq_encoder.train()
self.model.seq2seq_decoder.train()
self.model.seq2seq_gen_linear.train()
self.model.answer_in_embedder.eval()
self.model.answer_encoder.eval()
self.model.answer_rnn.eval()
def _ans_train(self):
self.model.seq2seq_in_embedder.eval()
self.model.seq2seq_out_embedder.eval()
self.model.seq2seq_encoder.eval()
self.model.seq2seq_decoder.eval()
self.model.seq2seq_gen_linear.eval()
self.model.answer_in_embedder.train()
self.model.answer_encoder.train()
self.model.answer_rnn.train()
def _train_seq2seq_batch(self, batch):
batch_loss = self.model.seq2seq_calculate_loss(batch)
return batch_loss
def _train_ans_batch(self, batch):
batch_loss = self.model.ans_module_calculate_loss(batch)
return batch_loss
def _train_epoch(self):
epoch_start_time = time.time()
loss_total_seq2seq = 0.
loss_total_ans_module = 0.
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
# first stage
self._seq2seq_train()
self.model.zero_grad()
batch_seq2seq_loss = self._train_seq2seq_batch(batch)
self.optimizer.step()
# second stage
self._ans_train()
self.model.zero_grad()
batch_ans_module_loss = self._train_ans_batch(batch)
loss_total_seq2seq += batch_seq2seq_loss
loss_total_ans_module += batch_ans_module_loss
# self.seq2seq_optimizer.step()
# self.answer_module_optimizer.step()
self.answer_module_optimizer.step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total_seq2seq, loss_total_ans_module, epoch_time_cost
def _eval_batch(self, batch, x=0):
test_out, target, temp_out, temp_tar, equ_out, equ_tar = self.model.model_test(
batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
temp_acc = []
equs_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
equ_acc.append(equ_ac)
val_acc.append(val_ac)
if temp_out[idx] == temp_tar[idx]:
temp_acc.append(True)
else:
temp_acc.append(False)
if equ_out[idx] == equ_tar[idx]:
equs_acc.append(True)
else:
equs_acc.append(False)
if x:
self.logger.info('{}\n{}\n{} {} {}\n{} {} {}'.format([batch["ques source 1"][idx]], [batch["ques source"][idx]],
equ_out[idx], temp_out[idx], test_out[idx],
equ_tar[idx], temp_tar[idx], target[idx]))
return val_acc, equ_acc, temp_acc, equs_acc
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total_seq2seq, loss_total_ans_module, train_time_cost = self._train_epoch()
self.logger.info("epoch [%3d] avr seq2seq module loss [%2.8f] | avr answer module loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total_seq2seq/self.train_batch_nums, loss_total_ans_module/self.train_batch_nums, train_time_cost))
self.logger.info("target wrong: {} target total: {}".format(
self.model.wrong, self.dataloader.trainset_nums))
self.model.wrong = 0
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, template_ac, equation_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | seq2seq module acc [%2.3f] | answer module acc [%2.3f]"
% (test_total, template_ac, equation_ac))
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, _, _, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, _, _, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
# self.test(DatasetType.Test)
# self.test(DatasetType.Train)
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,float,float,int,str):
equation accuracy, value accuracy, seq2seq module accuracy, answer module accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
template_ac = 0
equations_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac, batch_temp_acc, batch_equs_acc = self._eval_batch(
batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
template_ac += batch_temp_acc.count(True)
equations_ac += batch_equs_acc.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total,\
template_ac / eval_total, equations_ac / eval_total,\
eval_total, test_time_cost
def test(self, type):
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
ans_acc = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(type):
batch_val_ac, batch_equ_ac, batch_temp_acc, batch_equs_acc = self._eval_batch(
batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
ans_acc += batch_equs_acc.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
# self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"\
# %(eval_total,equation_ac/eval_total,value_ac/eval_total,test_time_cost))
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def param_search(self):
"""hyper-parameter search.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
seq2seq_loss_total, _, train_time_cost = self._train_epoch()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, _, acc, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class SalignedTrainer(SupervisedTrainer):
"""saligned trainer, used to implement training, testing, parameter searching for deep-learning model S-aligned.
example of instantiation:
>>> trainer = SalignedTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model
train_batch_size (int): the training batch size.
epoch_nums (int): number of epochs.
step_size (int): step_size of scheduler.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
def _build_optimizer(self):
# optimizer
self.optimizer = torch.optim.Adam(self.model.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=self.config["step_size"], gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.optimizer.load_state_dict(check_pnt["optimizer"])
# load parameter of scheduler
self.scheduler.load_state_dict(check_pnt["scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.scheduler.step()
def _optimizer_step(self):
self.optimizer.step()
def adjust_equ(self, op_target, eq_len, num_list):
batch_size, batch_len = op_target.size()
# change NUM
# target_mask = torch.ge(op_target, self.min_NUM) * torch.le(op_target, self.max_NUM).to(torch.long)
# op_target = (op_target + self.UNK - self.min_NUM + 4) * target_mask + op_target * (1 - target_mask)
# change constants
target_mask = torch.ge(op_target, self.min_CON) * \
torch.le(op_target, self.max_NUM).to(torch.long)
op_target = (op_target + 3) * target_mask + \
op_target * (1 - target_mask)
# change unk
target_mask = torch.eq(op_target, self.UNK).to(torch.long)
op_target = (self.min_NUM + 3) * target_mask + \
op_target * (1 - target_mask)
# change +/-/*//
target_mask = torch.ge(op_target, self.ADD) * \
torch.le(op_target, self.POWER - 1).to(torch.long)
op_target = (op_target + 2) * target_mask + \
op_target * (1 - target_mask)
# change padding
#print(eq_len, num_list)
target_mask = torch.tensor([[1] * eq_len[b] + [0] * (batch_len - eq_len[b])
for b in range(batch_size)]).to(torch.long).to(self.model._device)
op_target = op_target * target_mask
# attach prefix/postfix
batch_size, _ = op_target.size()
# if self.do_addeql:
eq_postfix = torch.zeros((batch_size, 1), dtype=torch.long).to(
self.model._device) + 2
op_target = torch.cat([op_target, eq_postfix], dim=1)
op_target.scatter_(1, torch.tensor([[idx] for idx in eq_len]).to(
self.model._device), self.model.EQL)
#op_target[torch.arange(batch_size).unsqueeze(1), eq_len] = self.model.EQL
#print('op_target', op_target[:3, :10])
gen_var_prefix = [self.min_NUM + len(num) + 3 for num in num_list]
#print('gen_var_prefix', self.max_NUM, num_list, gen_var_prefix)
gen_var_prefix = torch.tensor(
gen_var_prefix, dtype=torch.long).unsqueeze(1).to(self.model._device)
# gen_var_prefix = torch.zeros((batch_size, 1), dtype=torch.long).to(self.model._device) + 14 #self.max_NUM + 4
x_prefix = torch.zeros((batch_size, 1), dtype=torch.long).to(
self.model._device) + self.model.GEN_VAR
op_target = torch.cat([x_prefix, gen_var_prefix, op_target], dim=1)
# if self.do_addeql:
eq_len = [(idx + 3) for idx in eq_len]
# else:
# eq_len = [(idx + 2) for idx in eq_len]
return op_target, eq_len
def _train_batch(self, batch):
order = torch.sort(batch['ques len'] * -1)[1]
for k in batch:
if type(batch[k]) is list:
batch[k] = [batch[k][i] for i in order]
else:
batch[k] = batch[k][order]
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
order = torch.sort(batch['ques len'] * -1)[1]
for k in batch:
if type(batch[k]) is list:
batch[k] = [batch[k][i] for i in order]
else:
batch[k] = batch[k][order]
test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.model.train()
# print(self.dataloader.dataset.out_symbol2idx); #exit()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
# if batch_idx >= 100: continue
#print('batch_idx', batch_idx)
batch["raw_equation"] = batch["equation"].clone()
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
batch_loss.backward()
self._optimizer_step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch_idx, batch in enumerate(self.dataloader.load_data(eval_set)):
if batch_idx >= 3000:
continue
batch["raw_equation"] = batch["equation"].clone()
# batch["equation"], batch['equ len'] = self.adjust_equ(batch["raw_equation"], batch['equ len'],
# batch['num list'])
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
pass
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
class HMSTrainer(GTSTrainer):
def __init__(self, config, model, dataloader, evaluator):
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
self.optimizer = torch.optim.Adam(self.model.parameters(
), lr=self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=self.config["step_size"], gamma=self.config["scheduler_gamma"])
def _optimizer_step(self):
self.optimizer.step()
def _scheduler_step(self):
self.scheduler.step()
def _load_checkpoint(self):
#check_pnt = torch.load(self.config["checkpoint_path"],map_location="cpu")
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.optimizer.load_state_dict(check_pnt["optimizer"])
self.scheduler.load_state_dict(check_pnt["scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
class TSNTrainer(AbstractTrainer):
"""tsn trainer, used to implement training, testing, parameter searching for deep-learning model TSN.
example of instantiation:
>>> trainer = TSNTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model
train_batch_size (int): the training batch size.
epoch_nums (int): number of epochs.
step_size (int): step_size of scheduler.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self.t_start_epoch = 0
self.s_start_epoch = 0
self.t_epoch_i = 0
self.s_epoch_i = 0
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
def _build_optimizer(self):
# optimizer
self.t_embedder_optimizer = torch.optim.Adam(self.model.t_embedder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.t_encoder_optimizer = torch.optim.Adam(self.model.t_encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.t_decoder_optimizer = torch.optim.Adam(self.model.t_decoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.t_node_generater_optimizer = torch.optim.Adam(self.model.t_node_generater.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.t_merge_optimizer = torch.optim.Adam(self.model.t_merge.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_embedder_optimizer = torch.optim.Adam(self.model.s_embedder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_encoder_optimizer = torch.optim.Adam(self.model.s_encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_decoder_optimizer1 = torch.optim.Adam(self.model.s_decoder_1.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_node_generater_optimizer1 = torch.optim.Adam(self.model.s_node_generater_1.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_merge_optimizer1 = torch.optim.Adam(self.model.s_merge_1.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_decoder_optimizer2 = torch.optim.Adam(self.model.s_decoder_2.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_node_generater_optimizer2 = torch.optim.Adam(self.model.s_node_generater_2.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_merge_optimizer2 = torch.optim.Adam(self.model.s_merge_2.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
self.t_embedder_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_embedder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.t_encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.t_decoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_decoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.t_node_generater_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_node_generater_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.t_merge_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_merge_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.s_embedder_scheduler = torch.optim.lr_scheduler.StepLR(
self.s_embedder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.s_encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.s_encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.s_decoder_scheduler1 = torch.optim.lr_scheduler.StepLR(
self.s_decoder_optimizer1, step_size=self.config["step_size"], gamma=0.5)
self.s_node_generater_scheduler1 = torch.optim.lr_scheduler.StepLR(
self.s_node_generater_optimizer1, step_size=self.config["step_size"], gamma=0.5)
self.s_merge_scheduler1 = torch.optim.lr_scheduler.StepLR(
self.s_merge_optimizer1, step_size=self.config["step_size"], gamma=0.5)
self.s_decoder_scheduler2 = torch.optim.lr_scheduler.StepLR(
self.s_decoder_optimizer2, step_size=self.config["step_size"], gamma=0.5)
self.s_node_generater_scheduler2 = torch.optim.lr_scheduler.StepLR(
self.s_node_generater_optimizer2, step_size=self.config["step_size"], gamma=0.5)
self.s_merge_scheduler2 = torch.optim.lr_scheduler.StepLR(
self.s_merge_optimizer2, step_size=self.config["step_size"], gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"t_embedder_optimizer": self.t_embedder_optimizer.state_dict(),
"t_encoder_optimizer": self.t_encoder_optimizer.state_dict(),
"t_decoder_optimizer": self.t_decoder_optimizer.state_dict(),
"t_generate_optimizer": self.t_node_generater_optimizer.state_dict(),
"t_merge_optimizer": self.t_merge_optimizer.state_dict(),
"t_embedder_scheduler": self.t_embedder_scheduler.state_dict(),
"t_encoder_scheduler": self.t_encoder_scheduler.state_dict(),
"t_decoder_scheduler": self.t_decoder_scheduler.state_dict(),
"t_generate_scheduler": self.t_node_generater_scheduler.state_dict(),
"t_merge_scheduler": self.t_merge_scheduler.state_dict(),
"s_embedder_optimizer": self.s_embedder_optimizer.state_dict(),
"s_encoder_optimizer": self.s_encoder_optimizer.state_dict(),
"s_decoder_optimizer1": self.s_decoder_optimizer1.state_dict(),
"s_generate_optimizer1": self.s_node_generater_optimizer1.state_dict(),
"s_merge_optimizer1": self.s_merge_optimizer1.state_dict(),
"s_decoder_optimizer2": self.s_decoder_optimizer2.state_dict(),
"s_generate_optimizer2": self.s_node_generater_optimizer2.state_dict(),
"s_merge_optimizer2": self.s_merge_optimizer2.state_dict(),
"s_embedder_scheduler": self.s_embedder_scheduler.state_dict(),
"s_encoder_scheduler": self.s_encoder_scheduler.state_dict(),
"s_decoder_scheduler1": self.s_decoder_scheduler1.state_dict(),
"s_generate_scheduler1": self.s_node_generater_scheduler1.state_dict(),
"s_merge_scheduler1": self.s_merge_scheduler1.state_dict(),
"s_decoder_scheduler2": self.s_decoder_scheduler2.state_dict(),
"s_generate_scheduler2": self.s_node_generater_scheduler2.state_dict(),
"s_merge_scheduler2": self.s_merge_scheduler2.state_dict(),
"t_start_epoch": self.t_epoch_i,
"s_start_epoch": self.s_epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.t_embedder_optimizer.load_state_dict(
check_pnt['t_embedder_optimizer'])
self.t_encoder_optimizer.load_state_dict(
check_pnt['t_encoder_optimizer'])
self.t_decoder_optimizer.load_state_dict(
check_pnt['t_decoder_optimizer'])
self.t_node_generater_optimizer.load_state_dict(
check_pnt['t_node_generater_optimizer'])
self.t_merge_optimizer.load_state_dict(check_pnt['t_merge_optimizer'])
self.s_embedder_optimizer.load_state_dict(
check_pnt['s_embedder_optimizer'])
self.s_encoder_optimizer.load_state_dict(
check_pnt['s_encoder_optimizer'])
self.s_decoder_optimizer1.load_state_dict(
check_pnt['s_decoder_optimizer1'])
self.s_node_generater_optimizer1.load_state_dict(
check_pnt['s_node_generater_optimizer1'])
self.s_merge_optimizer1.load_state_dict(
check_pnt['s_merge_optimizer1'])
self.s_decoder_optimizer2.load_state_dict(
check_pnt['s_decoder_optimizer2'])
self.s_node_generater_optimizer2.load_state_dict(
check_pnt['s_node_generater_optimizer2'])
self.s_merge_optimizer2.load_state_dict(
check_pnt['s_merge_optimizer2'])
# load parameter of scheduler
self.t_embedder_scheduler.load_state_dict(
check_pnt['t_embedder_scheduler'])
self.t_encoder_scheduler.load_state_dict(
check_pnt['t_encoder_scheduler'])
self.t_decoder_scheduler.load_state_dict(
check_pnt['t_decoder_scheduler'])
self.t_node_generater_scheduler.load_state_dict(
check_pnt['t_node_generater_scheduler'])
self.t_merge_scheduler.load_state_dict(check_pnt['t_merge_scheduler'])
self.s_embedder_scheduler.load_state_dict(
check_pnt['s_embedder_scheduler'])
self.s_encoder_scheduler.load_state_dict(
check_pnt['s_encoder_scheduler'])
self.s_decoder_scheduler1.load_state_dict(
check_pnt['s_decoder_scheduler1'])
self.s_node_generater_scheduler1.load_state_dict(
check_pnt['s_node_generater_scheduler1'])
self.s_merge_scheduler1.load_state_dict(
check_pnt['s_merge_scheduler1'])
self.s_decoder_scheduler2.load_state_dict(
check_pnt['s_decoder_scheduler2'])
self.s_node_generater_scheduler2.load_state_dict(
check_pnt['s_node_generater_scheduler2'])
self.s_merge_scheduler2.load_state_dict(
check_pnt['s_merge_scheduler2'])
# other parameter
self.t_start_epoch = check_pnt["t_start_epoch"]
self.s_start_epoch = check_pnt['s_start_epoch']
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _teacher_net_train(self):
self.model.t_embedder.train()
self.model.t_encoder.train()
self.model.t_decoder.train()
self.model.t_node_generater.train()
self.model.t_merge.train()
self.model.s_embedder.eval()
self.model.s_encoder.eval()
self.model.s_decoder_1.eval()
self.model.s_node_generater_1.eval()
self.model.s_merge_1.eval()
self.model.s_decoder_2.eval()
self.model.s_node_generater_2.eval()
self.model.s_merge_2.eval()
def _student_net_train(self):
self.model.t_embedder.eval()
self.model.t_encoder.eval()
self.model.t_decoder.eval()
self.model.t_node_generater.eval()
self.model.t_merge.eval()
self.model.s_embedder.train()
self.model.s_encoder.train()
self.model.s_decoder_1.train()
self.model.s_node_generater_1.train()
self.model.s_merge_1.train()
self.model.s_decoder_2.train()
self.model.s_node_generater_2.train()
self.model.s_merge_2.train()
def _teacher_optimizer_step(self):
self.t_embedder_optimizer.step()
self.t_encoder_optimizer.step()
self.t_decoder_optimizer.step()
self.t_node_generater_optimizer.step()
self.t_merge_optimizer.step()
def _student_optimizer_step(self):
self.s_embedder_optimizer.step()
self.s_encoder_optimizer.step()
self.s_decoder_optimizer1.step()
self.s_node_generater_optimizer1.step()
self.s_merge_optimizer1.step()
self.s_decoder_optimizer2.step()
self.s_node_generater_optimizer2.step()
self.s_merge_optimizer2.step()
def _teacher_scheduler_step(self):
self.t_embedder_scheduler.step()
self.t_encoder_scheduler.step()
self.t_decoder_scheduler.step()
self.t_node_generater_scheduler.step()
self.t_merge_scheduler.step()
def _student_scheduler_step(self):
self.s_embedder_scheduler.step()
self.s_encoder_scheduler.step()
self.s_decoder_scheduler1.step()
self.s_node_generater_scheduler1.step()
self.s_merge_scheduler1.step()
self.s_decoder_scheduler2.step()
self.s_node_generater_scheduler2.step()
self.s_merge_scheduler2.step()
def _train_teacher_net_batch(self, batch):
batch_loss = self.model.teacher_calculate_loss(batch)
return batch_loss
def _train_student_net_batch(self, batch):
batch_loss = self.model.student_calculate_loss(batch)
return batch_loss
def _eval_teacher_net_batch(self, batch):
test_out, target = self.model.teacher_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
return val_acc, equ_acc
def _eval_student_net_batch(self, batch):
test_out1, score1, test_out2, score2, target = self.model.student_test(
batch)
batch_size = len(test_out1)
val_acc = []
equ_acc = []
s1_val_acc = []
s1_equ_acc = []
s2_val_acc = []
s2_equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac1, equ_ac1, _, _ = self.evaluator.result(
test_out1[idx], target[idx])
val_ac2, equ_ac2, _, _ = self.evaluator.result(
test_out2[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac1, equ_ac1, _, _ = self.evaluator.result_multi(
test_out1[idx], target[idx])
val_ac2, equ_ac2, _, _ = self.evaluator.result_multi(
test_out2[idx], target[idx])
else:
raise NotImplementedError
if score1 > score2:
val_acc.append(val_ac1)
equ_acc.append(equ_ac1)
else:
val_acc.append(val_ac2)
equ_acc.append(equ_ac2)
s1_val_acc.append(val_ac1)
s1_equ_acc.append(equ_ac1)
s2_val_acc.append(val_ac2)
s2_equ_acc.append(equ_ac2)
return val_acc, equ_acc, s1_val_acc, s1_equ_acc, s2_val_acc, s2_equ_acc
def _build_soft_target_batch(self, batch):
self.model.init_soft_target(batch)
def _train_epoch(self, module_name):
epoch_start_time = time.time()
loss_total = 0.
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
if module_name == 'teacher_net':
self._teacher_net_train()
batch_loss = self._train_teacher_net_batch(batch)
self._teacher_optimizer_step()
elif module_name == 'student_net':
self._student_net_train()
batch_loss = self._train_student_net_batch(batch)
self._student_optimizer_step()
else:
NotImplementedError("TSN has no {} module".format(module_name))
loss_total += batch_loss
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
self.logger.info("start training teacher net...")
for epo in range(self.t_start_epoch, epoch_nums):
self.t_epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch(
module_name='teacher_net')
self._teacher_scheduler_step()
self.logger.info("epoch [%3d] teacher net avr loss [%2.8f] | train time %s"
% (self.t_epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate_teacher(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate_teacher(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate_teacher(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
self._load_model()
self.logger.info("build soft target...")
self.model.eval()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self._build_soft_target_batch(batch)
self.model.init_encoder_mask(self.config['train_batch_size'])
self.logger.info("start training student net...")
self.best_valid_value_accuracy = 0.
self.best_valid_equ_accuracy = 0.
self.best_test_value_accuracy = 0.
self.best_test_equ_accuracy = 0.
for epo in range(self.s_start_epoch, epoch_nums):
self.s_epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch(
module_name='student_net')
self._student_scheduler_step()
self.logger.info("epoch [%3d] student net avr loss [%2.8f] | train time %s"
% (self.s_epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac, test_total, test_time_cost = self.evaluate_student(
DatasetType.Test)
self.logger.info("---------- test total [%d] | student1 equ acc [%2.3f] | student1 value acc [%2.3f] | student2 equ acc [%2.3f] | student2 value acc [%2.3f]"
% (test_total, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac))
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
else:
valid_equ_ac, valid_val_ac, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac, valid_total, valid_time_cost = self.evaluate_student(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | student1 equ acc [%2.3f] | student1 value acc [%2.3f] | student2 equ acc [%2.3f] | student2 value acc [%2.3f]"
% (test_total, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac))
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac, test_total, test_time_cost = self.evaluate_student(
DatasetType.Test)
self.logger.info("---------- test total [%d] | student1 equ acc [%2.3f] | student1 value acc [%2.3f] | student2 equ acc [%2.3f] | student2 value acc [%2.3f]"
% (test_total, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac))
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate_teacher(self, eval_set):
"""evaluate teacher net.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_teacher_net_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def evaluate_student(self, eval_set):
"""evaluate student net.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,float,float,float,float,int,str):
equation accuracy, value accuracy,
equation accuracy of student net 1, value accuracy of student net 1,
equation accuracy of student net 2, value accuracy of student net 2,
count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
s1_value_ac = 0
s1_equation_ac = 0
s2_value_ac = 0
s2_equation_ac = 0
eval_total = 0
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac, s1_val_ac, s1_equ_ac, s2_val_ac, s2_equ_ac = self._eval_student_net_batch(
batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
s1_value_ac += s1_val_ac.count(True)
s1_equation_ac += s1_equ_ac.count(True)
s2_value_ac += s2_val_ac.count(True)
s2_equation_ac += s2_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, s1_equation_ac / eval_total, s1_value_ac / eval_total,\
s2_equation_ac / eval_total, s2_value_ac / \
eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac, s1_val_ac, s1_equ_ac, s2_val_ac, s2_equ_ac = self._eval_student_net_batch(
batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
class EPTTrainer(AbstractTrainer):
"""ept trainer, used to implement training, testing, parameter searching for deep-learning model EPT.
example of instantiation:
>>> trainer = EPTTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model
train_batch_size (int): the training batch size.
epoch_nums (int): number of epochs.
gradient_accumulation_steps (int): gradient accumulation steps.
epoch_warmup (int): epoch warmup.
fix_encoder_embedding (bool): whether require gradient of embedding module of encoder
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._minibatch_per_epoch = int(
self.dataloader.trainset_nums / self.config["train_batch_size"]) + 1
self._step_per_epoch = int(math.ceil(
self._minibatch_per_epoch / self.config['gradient_accumulation_steps']))
self._steps_to_go = self._step_per_epoch * self.config["epoch_nums"]
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.optimizer.load_state_dict(check_pnt["optimizer"])
# load parameter of scheduler
self.scheduler.load_state_dict(check_pnt["scheduler"])
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
'''seq, seq_length, group_nums, target'''
test_out, target_out = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target_out[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target_out[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target_out[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.all_grad_applied = True
self.model.train()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
self.all_grad_applied = False
if self.batch_idx % self.config["gradient_accumulation_steps"] == 0:
if self.config['gradient_clip'] > 0:
# If clipping threshold is set, then clip the gradient
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.config['gradient_clip'])
# if self._config.gradient_normalize:
# # If normalizing gradient is set, then normalize the gradient
# self._normalize_gradients(*self.model.parameters())
# Apply optimizer & scheduler
self.optimizer.step()
self.scheduler.step()
self.all_grad_applied = True
else:
if not self.all_grad_applied:
if self.config['gradient_clip'] > 0:
# If clipping threshold is set, then clip the gradient
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.config['gradient_clip'])
# if self._config.gradient_normalize:
# # If normalizing gradient is set, then normalize the gradient
# self._normalize_gradients(*self.model.parameters())
# Apply optimizer & scheduler
self.optimizer.step()
self.scheduler.step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total / self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info(
"---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info(
"---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info(
"---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def _build_optimizer(self):
no_w_decay = {'bias', 'norm', 'Norm', '_embedding'}
parameters = [((2 if 'encoder.embeddings' in n else (1 if 'encoder' in n else 0),
any(t in n for t in no_w_decay)), p)
for n, p in self.model.named_parameters()]
parameters = groupby(
sorted(parameters, key=lambda t: t[0]), key=lambda t: t[0])
optimizer_grouped_parameters = []
for (encoder_type_flag, is_without_wd), group in parameters:
group = {'params': [p for _, p in group]}
if is_without_wd:
group['weight_decay'] = 0.0
if encoder_type_flag == 2 and self.config['fix_encoder_embedding']:
group['lr'] = 0.0
elif encoder_type_flag == 1:
group['lr'] = self.config["learning_rate"]
optimizer_grouped_parameters.append(group)
from torch_optimizer import Lamb
from torch.optim.lr_scheduler import LambdaLR
self.optimizer = Lamb(optimizer_grouped_parameters,
lr=self.config["learning_rate"], eps=1e-08, weight_decay=0.0)
self.warmup_steps = int(self._step_per_epoch *
self.config['epoch_warmup'])
def lr_lambda(current_step):
if current_step < self.warmup_steps:
return float(current_step) / float(max(1, self.warmup_steps))
return max(
0.0, float(self._steps_to_go - current_step) /
float(max(1, self._steps_to_go - self.warmup_steps))
)
if self.warmup_steps >= 0:
# Build scheduler before restoration
self.scheduler = LambdaLR(self.optimizer, lr_lambda, -1)
#self.optimizer = Lamb(self.model.parameters(), lr=self.config["learning_rate"], eps=1e-08, weight_decay=0.0)
def _normalize_gradients(self, *parameters):
"""
Normalize gradients (as in NVLAMB optimizer)
:param parameters: List of parameters whose gradient will be normalized.
:return: Frobenious Norm before applying normalization.
"""
parameters = [p for p in parameters if p.grad is not None]
# Compute total Frobenius norm
total_norm = 0
for p in parameters:
total_norm += p.grad.data.norm(2.0).item() ** 2.0
total_norm = total_norm ** 0.5
# Compute normalization constant. Set 1E-12 for minimum value to avoid inf.
normalizer = 1.0 / max(total_norm, 1e-12)
for p in parameters:
p.grad.data.mul_(normalizer)
return total_norm
def param_search(self):
"""hyper-parameter search.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class PretrainSeq2SeqTrainer(SupervisedTrainer):
def __init__(self, config, model, dataloader, evaluator):
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
if self.config['share_vocab']:
self.optimizer = torch.optim.Adam(
[
{"params": self.in_embedder.parameters(
), "lr": self.config["embedding_learning_rate"]},
{"params": self.encoder.parameters()},
{"params": self.decoder.parameters()},
{"params": self.model.generate_linear.parameters()}
],
lr=self.config["learning_rate"]
)
else:
self.optimizer = torch.optim.Adam(
[
{"params": self.model.in_embedder.parameters(
), "lr": self.config["embedding_learning_rate"]},
{"params": self.model.out_embedder.parameters()},
{"params": self.model.encoder.parameters()},
{"params": self.model.decoder.parameters()},
{"params": self.model.generate_linear.parameters()}
],
lr=self.config["learning_rate"]
)
class PretrainTRNNTrainer(TRNNTrainer):
def __init__(self, config, model, dataloader, evaluator):
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
if self.config['share_vocab']:
self.optimizer = torch.optim.Adam(
[
{'params': self.model.seq2seq_in_embedder.parameters(
), 'lr': self.config["embedding_learning_rate"]},
{'params': self.model.seq2seq_encoder.parameters()},
{'params': self.model.seq2seq_decoder.parameters()},
{'params': self.model.seq2seq_gen_linear.parameters()}
],
lr=self.config["seq2seq_learning_rate"]
)
else:
self.optimizer = torch.optim.Adam(
[
{'params': self.model.seq2seq_in_embedder.parameters(
), 'lr': self.config["embedding_learning_rate"]},
{'params': self.model.seq2seq_out_embedder.parameters()},
{'params': self.model.seq2seq_encoder.parameters()},
{'params': self.model.seq2seq_decoder.parameters()},
{'params': self.model.seq2seq_gen_linear.parameters()}
],
lr=self.config["seq2seq_learning_rate"]
)
self.answer_module_optimizer = torch.optim.SGD(
[
{'params': self.model.answer_in_embedder.parameters(
), 'lr': self.config["embedding_learning_rate"]},
{'params': self.model.answer_encoder.parameters()},
{'params': self.model.answer_rnn.parameters()}
],
lr=self.config["ans_learning_rate"],
momentum=0.9
)
class MWPBertTrainer(GTSTrainer):
def __init__(self, config, model, dataloader, evaluator):
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
self.encoder_optimizer = torch.optim.Adam(
self.model.encoder.parameters(),
self.config['encoding_learning_rate'],
weight_decay=self.config["weight_decay"]
)
self.decoder_optimizer = torch.optim.Adam(
self.model.decoder.parameters(),
self.config["learning_rate"],
weight_decay=self.config["weight_decay"]
)
self.node_generater_optimizer = torch.optim.Adam(
self.model.node_generater.parameters(),
self.config["learning_rate"],
weight_decay=self.config["weight_decay"]
)
self.merge_optimizer = torch.optim.Adam(
self.model.merge.parameters(),
self.config["learning_rate"],
weight_decay=self.config["weight_decay"]
)
# scheduler
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(self.encoder_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(self.decoder_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.node_generater_scheduler = torch.optim.lr_scheduler.StepLR(self.node_generater_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.merge_scheduler = torch.optim.lr_scheduler.StepLR(self.merge_optimizer, step_size=self.config["step_size"],
gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.node_generater_optimizer.state_dict(),
"merge_optimizer": self.merge_optimizer.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.node_generater_scheduler.state_dict(),
"merge_scheduler": self.merge_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.node_generater_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
self.merge_optimizer.load_state_dict(check_pnt["merge_optimizer"])
# load parameter of scheduler
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
self.merge_scheduler.load_state_dict(check_pnt["merge_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.encoder_scheduler.step()
self.decoder_scheduler.step()
self.node_generater_scheduler.step()
self.merge_scheduler.step()
def _optimizer_step(self):
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.node_generater_optimizer.step()
self.merge_optimizer.step()
|
from mwptoolkit.utils.utils import time_since, write_json_data
from mwptoolkit.utils.enum_type import TaskType, DatasetType, SpecialTokens
from mwptoolkit.trainer.template_trainer import TemplateTrainer
from mwptoolkit.trainer.abstract_trainer import AbstractTrainer
from ray import tune
import torch
from itertools import groupby
import math
import time
# -*- encoding: utf-8 -*-
# @Author: <NAME>
# @Time: 2021/08/29 22:14:01
# @File: supervised_trainer.py
class SupervisedTrainer(AbstractTrainer):
"""supervised trainer, used to implement training, testing, parameter searching in supervised learning.
example of instantiation:
>>> trainer = SupervisedTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model
train_batch_size (int): the training batch size.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
#self._build_loss(config["symbol_size"], self.dataloader.dataset.out_symbol2idx[SpecialTokens.PAD_TOKEN])
def _build_optimizer(self):
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=self.config["learning_rate"])
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
#check_pnt = torch.load(self.config["checkpoint_path"],map_location="cpu")
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.optimizer.load_state_dict(check_pnt["optimizer"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _idx2word_2idx(self, batch_equation):
batch_size, length = batch_equation.size()
batch_equation_ = []
for b in range(batch_size):
equation = []
for idx in range(length):
equation.append(self.dataloader.dataset.out_symbol2idx[
self.dataloader.dataset.in_idx2word[
batch_equation[b, idx]]])
batch_equation_.append(equation)
batch_equation_ = torch.LongTensor(
batch_equation_).to(self.config["device"])
return batch_equation_
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
questions, test_out, target = self.model.model_test(batch)
def polish_to_infix(equation):
final_eq = []
expressions = []
operators = ['+', '-', '*', '/']
for symbol in equation:
expressions.append(symbol)
while(len(expressions) >= 2):
n1 = expressions.pop()
n2 = expressions.pop()
if((n1 in operators) or (n2 in operators)):
expressions.append(n2)
expressions.append(n1)
break
op = expressions.pop()
feq = '(' + n2 + op + n1 + ')'
expressions.append(feq)
print(expressions)
return expressions.pop()
batch_size = len(test_out)
val_acc = []
equ_acc = []
# Added by Shyamoli
lst_questions = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
# Added to print questions
question_in_words = ''
if(val_ac == False):
for g in questions[idx]:
question_in_words += (
self.dataloader.dataset.in_idx2word[g] + " ")
lst_questions.append(question_in_words)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
for i in range(batch_size):
if(val_ac == True):
continue
print("Question:", lst_questions[i])
#print("Test output Polish", test_out[i])
#print("Target Polish", target[i])
print("Test output", polish_to_infix(test_out[i]))
print("Target", polish_to_infix(target[i]))
print("Val Acc", val_acc[i])
print("Equation Acc", equ_acc[i])
print('')
print('')
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.model.train()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
self.optimizer.step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
self._save_checkpoint()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def param_search(self):
"""hyper-parameter search.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class GTSTrainer(AbstractTrainer):
"""gts trainer, used to implement training, testing, parameter searching for deep-learning model GTS.
example of instantiation:
>>> trainer = GTSTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
embedding_learning_rate (float): learning rate of embedding module.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
def _build_optimizer(self):
# optimizer
# self.encoder_optimizer = torch.optim.Adam(
# [
# {'params': self.model.embedder.parameters()}, \
# {'params': self.model.encoder.parameters()}
# ],
# self.config["learning_rate"]
# )
self.embedder_optimizer = torch.optim.Adam(self.model.embedder.parameters(
), self.config["embedding_learning_rate"], weight_decay=self.config["weight_decay"])
self.encoder_optimizer = torch.optim.Adam(self.model.encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.decoder_optimizer = torch.optim.Adam(self.model.decoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.node_generater_optimizer = torch.optim.Adam(self.model.node_generater.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.merge_optimizer = torch.optim.Adam(self.model.merge.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
self.embedder_scheduler = torch.optim.lr_scheduler.StepLR(
self.embedder_optimizer, step_size=self.config["step_size"], gamma=0.5,)
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.decoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.node_generater_scheduler = torch.optim.lr_scheduler.StepLR(
self.node_generater_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.merge_scheduler = torch.optim.lr_scheduler.StepLR(
self.merge_optimizer, step_size=self.config["step_size"], gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"embedder_optimizer": self.embedder_optimizer.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.node_generater_optimizer.state_dict(),
"merge_optimizer": self.merge_optimizer.state_dict(),
"embedder_scheduler": self.embedder_scheduler.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.node_generater_scheduler.state_dict(),
"merge_scheduler": self.merge_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.embedder_optimizer.load_state_dict(
check_pnt["embedder_optimizer"])
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.node_generater_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
self.merge_optimizer.load_state_dict(check_pnt["merge_optimizer"])
# load parameter of scheduler
self.embedder_scheduler.load_state_dict(
check_pnt['embedder_scheduler'])
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
self.merge_scheduler.load_state_dict(check_pnt["merge_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.embedder_scheduler.step()
self.encoder_scheduler.step()
self.decoder_scheduler.step()
self.node_generater_scheduler.step()
self.merge_scheduler.step()
def _optimizer_step(self):
self.embedder_optimizer.step()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.node_generater_optimizer.step()
self.merge_optimizer.step()
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.model.train()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
self._optimizer_step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 1 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
pass
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def param_search(self):
"""hyper-parameter search.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class MultiEncDecTrainer(GTSTrainer):
"""multiencdec trainer, used to implement training, testing, parameter searching for deep-learning model MultiE&D.
example of instantiation:
>>> trainer = MultiEncDecTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
# optimizer
# self.embedder_optimizer = torch.optim.Adam(self.model.embedder.parameters(), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.encoder_optimizer = torch.optim.Adam(self.model.encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.numencoder_optimizer = torch.optim.Adam(self.model.numencoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.predict_optimizer = torch.optim.Adam(self.model.predict.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.decoder_optimizer = torch.optim.Adam(self.model.decoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.generate_optimizer = torch.optim.Adam(self.model.generate.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.merge_optimizer = torch.optim.Adam(self.model.merge.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
#self.optimizer = torch.optim.Adam(self.model.parameters(), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
#self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.config["step_size"], gamma=0.5)
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.numencoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.numencoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.predict_scheduler = torch.optim.lr_scheduler.StepLR(
self.predict_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.decoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.generate_scheduler = torch.optim.lr_scheduler.StepLR(
self.generate_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.merge_scheduler = torch.optim.lr_scheduler.StepLR(
self.merge_optimizer, step_size=self.config["step_size"], gamma=0.5)
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
# self.optimizer.load_state_dict(check_pnt['optimizer'])
self.numencoder_optimizer.load_state_dict(
check_pnt["numencoder_optimizer"])
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.predict_optimizer.load_state_dict(check_pnt['predict_optimizer'])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.generate_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
self.merge_optimizer.load_state_dict(check_pnt["merge_optimizer"])
# load parameter of scheduler
# self.scheduler.load_state_dict(check_pnt['scheduler'])
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.numencoder_scheduler.load_state_dict(
check_pnt["numencoder_scheduler"])
self.predict_scheduler.load_state_dict(check_pnt['predict_scheduler'])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
self.merge_scheduler.load_state_dict(check_pnt["merge_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"numencoder_optimizer": self.numencoder_optimizer.state_dict(),
"predict_optimizer": self.predict_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.generate_optimizer.state_dict(),
"merge_optimizer": self.merge_optimizer.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"numencoder_scheduler": self.numencoder_scheduler.state_dict(),
"predict_scheduler": self.predict_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.generate_scheduler.state_dict(),
"merge_scheduler": self.merge_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _scheduler_step(self):
# self.scheduler.step()
self.encoder_scheduler.step()
self.numencoder_scheduler.step()
self.predict_scheduler.step()
self.decoder_scheduler.step()
self.generate_scheduler.step()
self.merge_scheduler.step()
def _optimizer_step(self):
# self.optimizer.step()
self.encoder_optimizer.step()
self.numencoder_optimizer.step()
self.predict_optimizer.step()
self.decoder_optimizer.step()
self.generate_optimizer.step()
self.merge_optimizer.step()
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
out_type, test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation and out_type == 'tree':
val_ac, equ_ac, _, _ = self.evaluator.prefix_result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.SingleEquation and out_type == 'attn':
val_ac, equ_ac, _, _ = self.evaluator.postfix_result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation and out_type == 'tree':
val_ac, equ_ac, _, _ = self.evaluator.prefix_result_multi(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation and out_type == 'attn':
val_ac, equ_ac, _, _ = self.evaluator.postfix_result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'decoder': out_type,
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
class Graph2TreeTrainer(GTSTrainer):
"""graph2tree trainer, used to implement training, testing, parameter searching for deep-learning model Graph2Tree.
example of instantiation:
>>> trainer = Graph2TreeTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
embedding_learning_rate (float): learning rate of embedding module.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
class TreeLSTMTrainer(AbstractTrainer):
"""treelstm trainer, used to implement training, testing, parameter searching for deep-learning model TreeLSTM.
example of instantiation:
>>> trainer = TreeLSTMTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
def _build_optimizer(self):
# optimizer
self.embedder_optimizer = torch.optim.Adam(self.model.embedder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.encoder_optimizer = torch.optim.Adam(self.model.encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.decoder_optimizer = torch.optim.Adam(self.model.decoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.node_generater_optimizer = torch.optim.Adam(self.model.node_generater.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
self.embedder_scheduler = torch.optim.lr_scheduler.StepLR(
self.embedder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.decoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.node_generater_scheduler = torch.optim.lr_scheduler.StepLR(
self.node_generater_optimizer, step_size=self.config["step_size"], gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"embedder_optimizer": self.embedder_optimizer.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.node_generater_optimizer.state_dict(),
"embedder_scheduler": self.embedder_scheduler.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.node_generater_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.embedder_optimizer.load_state_dict(
check_pnt["embedder_optimizer"])
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.node_generater_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
# load parameter of scheduler
self.embedder_scheduler.load_state_dict(
check_pnt["embedder_scheduler"])
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.embedder_scheduler.step()
self.encoder_scheduler.step()
self.decoder_scheduler.step()
self.node_generater_scheduler.step()
def _optimizer_step(self):
self.embedder_optimizer.step()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.node_generater_optimizer.step()
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.model.train()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
self._optimizer_step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def param_search(self):
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class SAUSolverTrainer(GTSTrainer):
"""sausolver trainer, used to implement training, testing, parameter searching for deep-learning model SAUSolver.
example of instantiation:
>>> trainer = SAUSolverTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
# batch['ans'][idx] = [12,8]
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _build_optimizer(self):
self.embedder_optimizer = torch.optim.Adam(self.model.embedder.parameters(),
self.config["embedding_learning_rate"],
weight_decay=self.config["weight_decay"])
self.encoder_optimizer = torch.optim.Adam(self.model.encoder.parameters(), self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
self.decoder_optimizer = torch.optim.Adam(self.model.decoder.parameters(), self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
self.node_generater_optimizer = torch.optim.Adam(self.model.node_generater.parameters(),
self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
self.merge_optimizer = torch.optim.Adam(self.model.merge.parameters(), self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
self.sa_optimizer = torch.optim.Adam(self.model.sa.parameters(), self.config['learning_rate'],
weight_decay=self.config["weight_decay"])
# scheduler
self.embedder_scheduler = torch.optim.lr_scheduler.StepLR(self.embedder_optimizer,
step_size=self.config["step_size"], gamma=0.5, )
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(self.encoder_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(self.decoder_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.node_generater_scheduler = torch.optim.lr_scheduler.StepLR(self.node_generater_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.merge_scheduler = torch.optim.lr_scheduler.StepLR(self.merge_optimizer, step_size=self.config["step_size"],
gamma=0.5)
self.sa_scheduler = torch.optim.lr_scheduler.StepLR(self.sa_optimizer, step_size=self.config['step_size'],
gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"embedder_optimizer": self.embedder_optimizer.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.node_generater_optimizer.state_dict(),
"merge_optimizer": self.merge_optimizer.state_dict(),
"sa_optimizer": self.sa_optimizer.state_dict(),
"embedder_scheduler": self.embedder_scheduler.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.node_generater_scheduler.state_dict(),
"merge_scheduler": self.merge_scheduler.state_dict(),
"sa_scheduler": self.sa_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.embedder_optimizer.load_state_dict(
check_pnt["embedder_optimizer"])
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.node_generater_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
self.merge_optimizer.load_state_dict(check_pnt["merge_optimizer"])
self.sa_optimizer.load_state_dict(check_pnt["sa_optimizer"])
# load parameter of scheduler
self.embedder_scheduler.load_state_dict(
check_pnt['embedder_scheduler'])
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
self.merge_scheduler.load_state_dict(check_pnt["merge_scheduler"])
self.sa_scheduler.load_state_dict(check_pnt["sa_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.embedder_scheduler.step()
self.encoder_scheduler.step()
self.decoder_scheduler.step()
self.node_generater_scheduler.step()
self.merge_scheduler.step()
self.sa_scheduler.step()
def _optimizer_step(self):
self.embedder_optimizer.step()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.node_generater_optimizer.step()
self.merge_optimizer.step()
self.sa_optimizer.step()
class TRNNTrainer(SupervisedTrainer):
"""trnn trainer, used to implement training, testing, parameter searching for deep-learning model TRNN.
example of instantiation:
>>> trainer = TRNNTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
seq2seq_learning_rate (float): learning rate of seq2seq module.
ans_learning_rate (float): learning rate of answer module.
train_batch_size (int): the training batch size.
step_size (int): step_size of scheduler.
epoch_nums (int): number of epochs.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
def _build_optimizer(self):
#self.optimizer = torch.optim.Adam(self.model.parameters(),self.config["learning_rate"])
self.optimizer = torch.optim.Adam(
[
{'params': self.model.seq2seq_in_embedder.parameters()},
{'params': self.model.seq2seq_out_embedder.parameters()},
{'params': self.model.seq2seq_encoder.parameters()},
{'params': self.model.seq2seq_decoder.parameters()},
{'params': self.model.seq2seq_gen_linear.parameters()}
],
self.config["seq2seq_learning_rate"]
)
self.answer_module_optimizer = torch.optim.SGD(
[
{'params': self.model.answer_in_embedder.parameters()},
{'params': self.model.answer_encoder.parameters()},
{'params': self.model.answer_rnn.parameters()}
],
self.config["ans_learning_rate"],
momentum=0.9
)
def _seq2seq_train(self):
self.model.seq2seq_in_embedder.train()
self.model.seq2seq_out_embedder.train()
self.model.seq2seq_encoder.train()
self.model.seq2seq_decoder.train()
self.model.seq2seq_gen_linear.train()
self.model.answer_in_embedder.eval()
self.model.answer_encoder.eval()
self.model.answer_rnn.eval()
def _ans_train(self):
self.model.seq2seq_in_embedder.eval()
self.model.seq2seq_out_embedder.eval()
self.model.seq2seq_encoder.eval()
self.model.seq2seq_decoder.eval()
self.model.seq2seq_gen_linear.eval()
self.model.answer_in_embedder.train()
self.model.answer_encoder.train()
self.model.answer_rnn.train()
def _train_seq2seq_batch(self, batch):
batch_loss = self.model.seq2seq_calculate_loss(batch)
return batch_loss
def _train_ans_batch(self, batch):
batch_loss = self.model.ans_module_calculate_loss(batch)
return batch_loss
def _train_epoch(self):
epoch_start_time = time.time()
loss_total_seq2seq = 0.
loss_total_ans_module = 0.
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
# first stage
self._seq2seq_train()
self.model.zero_grad()
batch_seq2seq_loss = self._train_seq2seq_batch(batch)
self.optimizer.step()
# second stage
self._ans_train()
self.model.zero_grad()
batch_ans_module_loss = self._train_ans_batch(batch)
loss_total_seq2seq += batch_seq2seq_loss
loss_total_ans_module += batch_ans_module_loss
# self.seq2seq_optimizer.step()
# self.answer_module_optimizer.step()
self.answer_module_optimizer.step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total_seq2seq, loss_total_ans_module, epoch_time_cost
def _eval_batch(self, batch, x=0):
test_out, target, temp_out, temp_tar, equ_out, equ_tar = self.model.model_test(
batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
temp_acc = []
equs_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
equ_acc.append(equ_ac)
val_acc.append(val_ac)
if temp_out[idx] == temp_tar[idx]:
temp_acc.append(True)
else:
temp_acc.append(False)
if equ_out[idx] == equ_tar[idx]:
equs_acc.append(True)
else:
equs_acc.append(False)
if x:
self.logger.info('{}\n{}\n{} {} {}\n{} {} {}'.format([batch["ques source 1"][idx]], [batch["ques source"][idx]],
equ_out[idx], temp_out[idx], test_out[idx],
equ_tar[idx], temp_tar[idx], target[idx]))
return val_acc, equ_acc, temp_acc, equs_acc
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total_seq2seq, loss_total_ans_module, train_time_cost = self._train_epoch()
self.logger.info("epoch [%3d] avr seq2seq module loss [%2.8f] | avr answer module loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total_seq2seq/self.train_batch_nums, loss_total_ans_module/self.train_batch_nums, train_time_cost))
self.logger.info("target wrong: {} target total: {}".format(
self.model.wrong, self.dataloader.trainset_nums))
self.model.wrong = 0
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, template_ac, equation_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | seq2seq module acc [%2.3f] | answer module acc [%2.3f]"
% (test_total, template_ac, equation_ac))
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, _, _, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, _, _, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
# self.test(DatasetType.Test)
# self.test(DatasetType.Train)
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,float,float,int,str):
equation accuracy, value accuracy, seq2seq module accuracy, answer module accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
template_ac = 0
equations_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac, batch_temp_acc, batch_equs_acc = self._eval_batch(
batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
template_ac += batch_temp_acc.count(True)
equations_ac += batch_equs_acc.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total,\
template_ac / eval_total, equations_ac / eval_total,\
eval_total, test_time_cost
def test(self, type):
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
ans_acc = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(type):
batch_val_ac, batch_equ_ac, batch_temp_acc, batch_equs_acc = self._eval_batch(
batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
ans_acc += batch_equs_acc.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
# self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"\
# %(eval_total,equation_ac/eval_total,value_ac/eval_total,test_time_cost))
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def param_search(self):
"""hyper-parameter search.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
seq2seq_loss_total, _, train_time_cost = self._train_epoch()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, _, acc, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class SalignedTrainer(SupervisedTrainer):
"""saligned trainer, used to implement training, testing, parameter searching for deep-learning model S-aligned.
example of instantiation:
>>> trainer = SalignedTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model
train_batch_size (int): the training batch size.
epoch_nums (int): number of epochs.
step_size (int): step_size of scheduler.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
def _build_optimizer(self):
# optimizer
self.optimizer = torch.optim.Adam(self.model.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=self.config["step_size"], gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.optimizer.load_state_dict(check_pnt["optimizer"])
# load parameter of scheduler
self.scheduler.load_state_dict(check_pnt["scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.scheduler.step()
def _optimizer_step(self):
self.optimizer.step()
def adjust_equ(self, op_target, eq_len, num_list):
batch_size, batch_len = op_target.size()
# change NUM
# target_mask = torch.ge(op_target, self.min_NUM) * torch.le(op_target, self.max_NUM).to(torch.long)
# op_target = (op_target + self.UNK - self.min_NUM + 4) * target_mask + op_target * (1 - target_mask)
# change constants
target_mask = torch.ge(op_target, self.min_CON) * \
torch.le(op_target, self.max_NUM).to(torch.long)
op_target = (op_target + 3) * target_mask + \
op_target * (1 - target_mask)
# change unk
target_mask = torch.eq(op_target, self.UNK).to(torch.long)
op_target = (self.min_NUM + 3) * target_mask + \
op_target * (1 - target_mask)
# change +/-/*//
target_mask = torch.ge(op_target, self.ADD) * \
torch.le(op_target, self.POWER - 1).to(torch.long)
op_target = (op_target + 2) * target_mask + \
op_target * (1 - target_mask)
# change padding
#print(eq_len, num_list)
target_mask = torch.tensor([[1] * eq_len[b] + [0] * (batch_len - eq_len[b])
for b in range(batch_size)]).to(torch.long).to(self.model._device)
op_target = op_target * target_mask
# attach prefix/postfix
batch_size, _ = op_target.size()
# if self.do_addeql:
eq_postfix = torch.zeros((batch_size, 1), dtype=torch.long).to(
self.model._device) + 2
op_target = torch.cat([op_target, eq_postfix], dim=1)
op_target.scatter_(1, torch.tensor([[idx] for idx in eq_len]).to(
self.model._device), self.model.EQL)
#op_target[torch.arange(batch_size).unsqueeze(1), eq_len] = self.model.EQL
#print('op_target', op_target[:3, :10])
gen_var_prefix = [self.min_NUM + len(num) + 3 for num in num_list]
#print('gen_var_prefix', self.max_NUM, num_list, gen_var_prefix)
gen_var_prefix = torch.tensor(
gen_var_prefix, dtype=torch.long).unsqueeze(1).to(self.model._device)
# gen_var_prefix = torch.zeros((batch_size, 1), dtype=torch.long).to(self.model._device) + 14 #self.max_NUM + 4
x_prefix = torch.zeros((batch_size, 1), dtype=torch.long).to(
self.model._device) + self.model.GEN_VAR
op_target = torch.cat([x_prefix, gen_var_prefix, op_target], dim=1)
# if self.do_addeql:
eq_len = [(idx + 3) for idx in eq_len]
# else:
# eq_len = [(idx + 2) for idx in eq_len]
return op_target, eq_len
def _train_batch(self, batch):
order = torch.sort(batch['ques len'] * -1)[1]
for k in batch:
if type(batch[k]) is list:
batch[k] = [batch[k][i] for i in order]
else:
batch[k] = batch[k][order]
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
order = torch.sort(batch['ques len'] * -1)[1]
for k in batch:
if type(batch[k]) is list:
batch[k] = [batch[k][i] for i in order]
else:
batch[k] = batch[k][order]
test_out, target = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.model.train()
# print(self.dataloader.dataset.out_symbol2idx); #exit()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
# if batch_idx >= 100: continue
#print('batch_idx', batch_idx)
batch["raw_equation"] = batch["equation"].clone()
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
batch_loss.backward()
self._optimizer_step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self._scheduler_step()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch_idx, batch in enumerate(self.dataloader.load_data(eval_set)):
if batch_idx >= 3000:
continue
batch["raw_equation"] = batch["equation"].clone()
# batch["equation"], batch['equ len'] = self.adjust_equ(batch["raw_equation"], batch['equ len'],
# batch['num list'])
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
pass
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
class HMSTrainer(GTSTrainer):
def __init__(self, config, model, dataloader, evaluator):
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
self.optimizer = torch.optim.Adam(self.model.parameters(
), lr=self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=self.config["step_size"], gamma=self.config["scheduler_gamma"])
def _optimizer_step(self):
self.optimizer.step()
def _scheduler_step(self):
self.scheduler.step()
def _load_checkpoint(self):
#check_pnt = torch.load(self.config["checkpoint_path"],map_location="cpu")
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.optimizer.load_state_dict(check_pnt["optimizer"])
self.scheduler.load_state_dict(check_pnt["scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
class TSNTrainer(AbstractTrainer):
"""tsn trainer, used to implement training, testing, parameter searching for deep-learning model TSN.
example of instantiation:
>>> trainer = TSNTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model
train_batch_size (int): the training batch size.
epoch_nums (int): number of epochs.
step_size (int): step_size of scheduler.
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self.t_start_epoch = 0
self.s_start_epoch = 0
self.t_epoch_i = 0
self.s_epoch_i = 0
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
def _build_optimizer(self):
# optimizer
self.t_embedder_optimizer = torch.optim.Adam(self.model.t_embedder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.t_encoder_optimizer = torch.optim.Adam(self.model.t_encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.t_decoder_optimizer = torch.optim.Adam(self.model.t_decoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.t_node_generater_optimizer = torch.optim.Adam(self.model.t_node_generater.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.t_merge_optimizer = torch.optim.Adam(self.model.t_merge.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_embedder_optimizer = torch.optim.Adam(self.model.s_embedder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_encoder_optimizer = torch.optim.Adam(self.model.s_encoder.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_decoder_optimizer1 = torch.optim.Adam(self.model.s_decoder_1.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_node_generater_optimizer1 = torch.optim.Adam(self.model.s_node_generater_1.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_merge_optimizer1 = torch.optim.Adam(self.model.s_merge_1.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_decoder_optimizer2 = torch.optim.Adam(self.model.s_decoder_2.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_node_generater_optimizer2 = torch.optim.Adam(self.model.s_node_generater_2.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
self.s_merge_optimizer2 = torch.optim.Adam(self.model.s_merge_2.parameters(
), self.config["learning_rate"], weight_decay=self.config["weight_decay"])
# scheduler
self.t_embedder_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_embedder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.t_encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.t_decoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_decoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.t_node_generater_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_node_generater_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.t_merge_scheduler = torch.optim.lr_scheduler.StepLR(
self.t_merge_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.s_embedder_scheduler = torch.optim.lr_scheduler.StepLR(
self.s_embedder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.s_encoder_scheduler = torch.optim.lr_scheduler.StepLR(
self.s_encoder_optimizer, step_size=self.config["step_size"], gamma=0.5)
self.s_decoder_scheduler1 = torch.optim.lr_scheduler.StepLR(
self.s_decoder_optimizer1, step_size=self.config["step_size"], gamma=0.5)
self.s_node_generater_scheduler1 = torch.optim.lr_scheduler.StepLR(
self.s_node_generater_optimizer1, step_size=self.config["step_size"], gamma=0.5)
self.s_merge_scheduler1 = torch.optim.lr_scheduler.StepLR(
self.s_merge_optimizer1, step_size=self.config["step_size"], gamma=0.5)
self.s_decoder_scheduler2 = torch.optim.lr_scheduler.StepLR(
self.s_decoder_optimizer2, step_size=self.config["step_size"], gamma=0.5)
self.s_node_generater_scheduler2 = torch.optim.lr_scheduler.StepLR(
self.s_node_generater_optimizer2, step_size=self.config["step_size"], gamma=0.5)
self.s_merge_scheduler2 = torch.optim.lr_scheduler.StepLR(
self.s_merge_optimizer2, step_size=self.config["step_size"], gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"t_embedder_optimizer": self.t_embedder_optimizer.state_dict(),
"t_encoder_optimizer": self.t_encoder_optimizer.state_dict(),
"t_decoder_optimizer": self.t_decoder_optimizer.state_dict(),
"t_generate_optimizer": self.t_node_generater_optimizer.state_dict(),
"t_merge_optimizer": self.t_merge_optimizer.state_dict(),
"t_embedder_scheduler": self.t_embedder_scheduler.state_dict(),
"t_encoder_scheduler": self.t_encoder_scheduler.state_dict(),
"t_decoder_scheduler": self.t_decoder_scheduler.state_dict(),
"t_generate_scheduler": self.t_node_generater_scheduler.state_dict(),
"t_merge_scheduler": self.t_merge_scheduler.state_dict(),
"s_embedder_optimizer": self.s_embedder_optimizer.state_dict(),
"s_encoder_optimizer": self.s_encoder_optimizer.state_dict(),
"s_decoder_optimizer1": self.s_decoder_optimizer1.state_dict(),
"s_generate_optimizer1": self.s_node_generater_optimizer1.state_dict(),
"s_merge_optimizer1": self.s_merge_optimizer1.state_dict(),
"s_decoder_optimizer2": self.s_decoder_optimizer2.state_dict(),
"s_generate_optimizer2": self.s_node_generater_optimizer2.state_dict(),
"s_merge_optimizer2": self.s_merge_optimizer2.state_dict(),
"s_embedder_scheduler": self.s_embedder_scheduler.state_dict(),
"s_encoder_scheduler": self.s_encoder_scheduler.state_dict(),
"s_decoder_scheduler1": self.s_decoder_scheduler1.state_dict(),
"s_generate_scheduler1": self.s_node_generater_scheduler1.state_dict(),
"s_merge_scheduler1": self.s_merge_scheduler1.state_dict(),
"s_decoder_scheduler2": self.s_decoder_scheduler2.state_dict(),
"s_generate_scheduler2": self.s_node_generater_scheduler2.state_dict(),
"s_merge_scheduler2": self.s_merge_scheduler2.state_dict(),
"t_start_epoch": self.t_epoch_i,
"s_start_epoch": self.s_epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.t_embedder_optimizer.load_state_dict(
check_pnt['t_embedder_optimizer'])
self.t_encoder_optimizer.load_state_dict(
check_pnt['t_encoder_optimizer'])
self.t_decoder_optimizer.load_state_dict(
check_pnt['t_decoder_optimizer'])
self.t_node_generater_optimizer.load_state_dict(
check_pnt['t_node_generater_optimizer'])
self.t_merge_optimizer.load_state_dict(check_pnt['t_merge_optimizer'])
self.s_embedder_optimizer.load_state_dict(
check_pnt['s_embedder_optimizer'])
self.s_encoder_optimizer.load_state_dict(
check_pnt['s_encoder_optimizer'])
self.s_decoder_optimizer1.load_state_dict(
check_pnt['s_decoder_optimizer1'])
self.s_node_generater_optimizer1.load_state_dict(
check_pnt['s_node_generater_optimizer1'])
self.s_merge_optimizer1.load_state_dict(
check_pnt['s_merge_optimizer1'])
self.s_decoder_optimizer2.load_state_dict(
check_pnt['s_decoder_optimizer2'])
self.s_node_generater_optimizer2.load_state_dict(
check_pnt['s_node_generater_optimizer2'])
self.s_merge_optimizer2.load_state_dict(
check_pnt['s_merge_optimizer2'])
# load parameter of scheduler
self.t_embedder_scheduler.load_state_dict(
check_pnt['t_embedder_scheduler'])
self.t_encoder_scheduler.load_state_dict(
check_pnt['t_encoder_scheduler'])
self.t_decoder_scheduler.load_state_dict(
check_pnt['t_decoder_scheduler'])
self.t_node_generater_scheduler.load_state_dict(
check_pnt['t_node_generater_scheduler'])
self.t_merge_scheduler.load_state_dict(check_pnt['t_merge_scheduler'])
self.s_embedder_scheduler.load_state_dict(
check_pnt['s_embedder_scheduler'])
self.s_encoder_scheduler.load_state_dict(
check_pnt['s_encoder_scheduler'])
self.s_decoder_scheduler1.load_state_dict(
check_pnt['s_decoder_scheduler1'])
self.s_node_generater_scheduler1.load_state_dict(
check_pnt['s_node_generater_scheduler1'])
self.s_merge_scheduler1.load_state_dict(
check_pnt['s_merge_scheduler1'])
self.s_decoder_scheduler2.load_state_dict(
check_pnt['s_decoder_scheduler2'])
self.s_node_generater_scheduler2.load_state_dict(
check_pnt['s_node_generater_scheduler2'])
self.s_merge_scheduler2.load_state_dict(
check_pnt['s_merge_scheduler2'])
# other parameter
self.t_start_epoch = check_pnt["t_start_epoch"]
self.s_start_epoch = check_pnt['s_start_epoch']
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _teacher_net_train(self):
self.model.t_embedder.train()
self.model.t_encoder.train()
self.model.t_decoder.train()
self.model.t_node_generater.train()
self.model.t_merge.train()
self.model.s_embedder.eval()
self.model.s_encoder.eval()
self.model.s_decoder_1.eval()
self.model.s_node_generater_1.eval()
self.model.s_merge_1.eval()
self.model.s_decoder_2.eval()
self.model.s_node_generater_2.eval()
self.model.s_merge_2.eval()
def _student_net_train(self):
self.model.t_embedder.eval()
self.model.t_encoder.eval()
self.model.t_decoder.eval()
self.model.t_node_generater.eval()
self.model.t_merge.eval()
self.model.s_embedder.train()
self.model.s_encoder.train()
self.model.s_decoder_1.train()
self.model.s_node_generater_1.train()
self.model.s_merge_1.train()
self.model.s_decoder_2.train()
self.model.s_node_generater_2.train()
self.model.s_merge_2.train()
def _teacher_optimizer_step(self):
self.t_embedder_optimizer.step()
self.t_encoder_optimizer.step()
self.t_decoder_optimizer.step()
self.t_node_generater_optimizer.step()
self.t_merge_optimizer.step()
def _student_optimizer_step(self):
self.s_embedder_optimizer.step()
self.s_encoder_optimizer.step()
self.s_decoder_optimizer1.step()
self.s_node_generater_optimizer1.step()
self.s_merge_optimizer1.step()
self.s_decoder_optimizer2.step()
self.s_node_generater_optimizer2.step()
self.s_merge_optimizer2.step()
def _teacher_scheduler_step(self):
self.t_embedder_scheduler.step()
self.t_encoder_scheduler.step()
self.t_decoder_scheduler.step()
self.t_node_generater_scheduler.step()
self.t_merge_scheduler.step()
def _student_scheduler_step(self):
self.s_embedder_scheduler.step()
self.s_encoder_scheduler.step()
self.s_decoder_scheduler1.step()
self.s_node_generater_scheduler1.step()
self.s_merge_scheduler1.step()
self.s_decoder_scheduler2.step()
self.s_node_generater_scheduler2.step()
self.s_merge_scheduler2.step()
def _train_teacher_net_batch(self, batch):
batch_loss = self.model.teacher_calculate_loss(batch)
return batch_loss
def _train_student_net_batch(self, batch):
batch_loss = self.model.student_calculate_loss(batch)
return batch_loss
def _eval_teacher_net_batch(self, batch):
test_out, target = self.model.teacher_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
return val_acc, equ_acc
def _eval_student_net_batch(self, batch):
test_out1, score1, test_out2, score2, target = self.model.student_test(
batch)
batch_size = len(test_out1)
val_acc = []
equ_acc = []
s1_val_acc = []
s1_equ_acc = []
s2_val_acc = []
s2_equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac1, equ_ac1, _, _ = self.evaluator.result(
test_out1[idx], target[idx])
val_ac2, equ_ac2, _, _ = self.evaluator.result(
test_out2[idx], target[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac1, equ_ac1, _, _ = self.evaluator.result_multi(
test_out1[idx], target[idx])
val_ac2, equ_ac2, _, _ = self.evaluator.result_multi(
test_out2[idx], target[idx])
else:
raise NotImplementedError
if score1 > score2:
val_acc.append(val_ac1)
equ_acc.append(equ_ac1)
else:
val_acc.append(val_ac2)
equ_acc.append(equ_ac2)
s1_val_acc.append(val_ac1)
s1_equ_acc.append(equ_ac1)
s2_val_acc.append(val_ac2)
s2_equ_acc.append(equ_ac2)
return val_acc, equ_acc, s1_val_acc, s1_equ_acc, s2_val_acc, s2_equ_acc
def _build_soft_target_batch(self, batch):
self.model.init_soft_target(batch)
def _train_epoch(self, module_name):
epoch_start_time = time.time()
loss_total = 0.
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
if module_name == 'teacher_net':
self._teacher_net_train()
batch_loss = self._train_teacher_net_batch(batch)
self._teacher_optimizer_step()
elif module_name == 'student_net':
self._student_net_train()
batch_loss = self._train_student_net_batch(batch)
self._student_optimizer_step()
else:
NotImplementedError("TSN has no {} module".format(module_name))
loss_total += batch_loss
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
self.logger.info("start training teacher net...")
for epo in range(self.t_start_epoch, epoch_nums):
self.t_epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch(
module_name='teacher_net')
self._teacher_scheduler_step()
self.logger.info("epoch [%3d] teacher net avr loss [%2.8f] | train time %s"
% (self.t_epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate_teacher(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate_teacher(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate_teacher(
DatasetType.Test)
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
self._load_model()
self.logger.info("build soft target...")
self.model.eval()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self._build_soft_target_batch(batch)
self.model.init_encoder_mask(self.config['train_batch_size'])
self.logger.info("start training student net...")
self.best_valid_value_accuracy = 0.
self.best_valid_equ_accuracy = 0.
self.best_test_value_accuracy = 0.
self.best_test_equ_accuracy = 0.
for epo in range(self.s_start_epoch, epoch_nums):
self.s_epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch(
module_name='student_net')
self._student_scheduler_step()
self.logger.info("epoch [%3d] student net avr loss [%2.8f] | train time %s"
% (self.s_epoch_i, loss_total/self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac, test_total, test_time_cost = self.evaluate_student(
DatasetType.Test)
self.logger.info("---------- test total [%d] | student1 equ acc [%2.3f] | student1 value acc [%2.3f] | student2 equ acc [%2.3f] | student2 value acc [%2.3f]"
% (test_total, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac))
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
else:
valid_equ_ac, valid_val_ac, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac, valid_total, valid_time_cost = self.evaluate_student(
DatasetType.Valid)
self.logger.info("---------- valid total [%d] | student1 equ acc [%2.3f] | student1 value acc [%2.3f] | student2 equ acc [%2.3f] | student2 value acc [%2.3f]"
% (test_total, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac))
self.logger.info("---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac, test_total, test_time_cost = self.evaluate_student(
DatasetType.Test)
self.logger.info("---------- test total [%d] | student1 equ acc [%2.3f] | student1 value acc [%2.3f] | student2 equ acc [%2.3f] | student2 value acc [%2.3f]"
% (test_total, s1_equ_ac, s1_val_ac, s2_equ_ac, s2_val_ac))
self.logger.info("---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate_teacher(self, eval_set):
"""evaluate teacher net.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_teacher_net_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def evaluate_student(self, eval_set):
"""evaluate student net.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,float,float,float,float,int,str):
equation accuracy, value accuracy,
equation accuracy of student net 1, value accuracy of student net 1,
equation accuracy of student net 2, value accuracy of student net 2,
count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
s1_value_ac = 0
s1_equation_ac = 0
s2_value_ac = 0
s2_equation_ac = 0
eval_total = 0
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac, s1_val_ac, s1_equ_ac, s2_val_ac, s2_equ_ac = self._eval_student_net_batch(
batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
s1_value_ac += s1_val_ac.count(True)
s1_equation_ac += s1_equ_ac.count(True)
s2_value_ac += s2_val_ac.count(True)
s2_equation_ac += s2_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, s1_equation_ac / eval_total, s1_value_ac / eval_total,\
s2_equation_ac / eval_total, s2_value_ac / \
eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac, s1_val_ac, s1_equ_ac, s2_val_ac, s2_equ_ac = self._eval_student_net_batch(
batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
class EPTTrainer(AbstractTrainer):
"""ept trainer, used to implement training, testing, parameter searching for deep-learning model EPT.
example of instantiation:
>>> trainer = EPTTrainer(config, model, dataloader, evaluator)
for training:
>>> trainer.fit()
for testing:
>>> trainer.test()
for parameter searching:
>>> trainer.param_search()
"""
def __init__(self, config, model, dataloader, evaluator):
"""
Args:
config (config): An instance object of Config, used to record parameter information.
model (Model): An object of deep-learning model.
dataloader (Dataloader): dataloader object.
evaluator (Evaluator): evaluator object.
expected that config includes these parameters below:
learning_rate (float): learning rate of model
train_batch_size (int): the training batch size.
epoch_nums (int): number of epochs.
gradient_accumulation_steps (int): gradient accumulation steps.
epoch_warmup (int): epoch warmup.
fix_encoder_embedding (bool): whether require gradient of embedding module of encoder
trained_model_path (str): a path of file which is used to save parameters of best model.
checkpoint_path (str): a path of file which is used save checkpoint of training progress.
output_path (str|None): a path of a json file which is used to save test output infomation fo model.
resume (bool): start training from last checkpoint.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
test_step (int): the epoch number of training after which conducts the evaluation on test.
best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run.
"""
super().__init__(config, model, dataloader, evaluator)
self._minibatch_per_epoch = int(
self.dataloader.trainset_nums / self.config["train_batch_size"]) + 1
self._step_per_epoch = int(math.ceil(
self._minibatch_per_epoch / self.config['gradient_accumulation_steps']))
self._steps_to_go = self._step_per_epoch * self.config["epoch_nums"]
self._build_optimizer()
if config["resume"]:
self._load_checkpoint()
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.optimizer.load_state_dict(check_pnt["optimizer"])
# load parameter of scheduler
self.scheduler.load_state_dict(check_pnt["scheduler"])
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _train_batch(self, batch):
batch_loss = self.model.calculate_loss(batch)
return batch_loss
def _eval_batch(self, batch):
'''seq, seq_length, group_nums, target'''
test_out, target_out = self.model.model_test(batch)
batch_size = len(test_out)
val_acc = []
equ_acc = []
for idx in range(batch_size):
if self.config["task_type"] == TaskType.SingleEquation:
val_ac, equ_ac, _, _ = self.evaluator.result(
test_out[idx], target_out[idx])
elif self.config["task_type"] == TaskType.MultiEquation:
val_ac, equ_ac, _, _ = self.evaluator.result_multi(
test_out[idx], target_out[idx])
else:
raise NotImplementedError
val_acc.append(val_ac)
equ_acc.append(equ_ac)
result = {
'id': batch['id'][idx],
'prediction': ' '.join(test_out[idx]),
'target': ' '.join(target_out[idx]),
'number list': batch['num list'][idx],
'value acc': val_ac,
'equ acc': equ_ac
}
self.output_result.append(result)
return val_acc, equ_acc
def _train_epoch(self):
epoch_start_time = time.time()
loss_total = 0.
self.all_grad_applied = True
self.model.train()
for batch_idx, batch in enumerate(self.dataloader.load_data(DatasetType.Train)):
self.batch_idx = batch_idx + 1
self.model.zero_grad()
batch_loss = self._train_batch(batch)
loss_total += batch_loss
self.all_grad_applied = False
if self.batch_idx % self.config["gradient_accumulation_steps"] == 0:
if self.config['gradient_clip'] > 0:
# If clipping threshold is set, then clip the gradient
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.config['gradient_clip'])
# if self._config.gradient_normalize:
# # If normalizing gradient is set, then normalize the gradient
# self._normalize_gradients(*self.model.parameters())
# Apply optimizer & scheduler
self.optimizer.step()
self.scheduler.step()
self.all_grad_applied = True
else:
if not self.all_grad_applied:
if self.config['gradient_clip'] > 0:
# If clipping threshold is set, then clip the gradient
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.config['gradient_clip'])
# if self._config.gradient_normalize:
# # If normalizing gradient is set, then normalize the gradient
# self._normalize_gradients(*self.model.parameters())
# Apply optimizer & scheduler
self.optimizer.step()
self.scheduler.step()
epoch_time_cost = time_since(time.time() - epoch_start_time)
return loss_total, epoch_time_cost
def fit(self):
"""train model.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
self.logger.info("epoch [%3d] avr loss [%2.8f] | train time %s"
% (self.epoch_i, loss_total / self.train_batch_nums, train_time_cost))
if epo % self.test_step == 0 or epo > epoch_nums - 5:
if self.config["k_fold"] or self.config["validset_divide"] is not True:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info(
"---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if test_val_ac >= self.best_test_value_accuracy:
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
else:
valid_equ_ac, valid_val_ac, valid_total, valid_time_cost = self.evaluate(
DatasetType.Valid)
self.logger.info(
"---------- valid total [%d] | valid equ acc [%2.3f] | valid value acc [%2.3f] | valid time %s"
% (valid_total, valid_equ_ac, valid_val_ac, valid_time_cost))
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
self.logger.info(
"---------- test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (test_total, test_equ_ac, test_val_ac, test_time_cost))
if valid_val_ac >= self.best_valid_value_accuracy:
self.best_valid_value_accuracy = valid_val_ac
self.best_valid_equ_accuracy = valid_equ_ac
self.best_test_value_accuracy = test_val_ac
self.best_test_equ_accuracy = test_equ_ac
self._save_model()
self._save_output()
if epo % 5 == 0:
self._save_checkpoint()
self.logger.info('''training finished.
best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f]
best test result : equation accuracy [%2.3f] | value accuracy [%2.3f]'''
% (self.best_valid_equ_accuracy, self.best_valid_value_accuracy,
self.best_test_equ_accuracy, self.best_test_value_accuracy))
def evaluate(self, eval_set):
"""evaluate model.
Args:
eval_set (str): [valid | test], the dataset for evaluation.
Returns:
tuple(float,float,int,str):
equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time.
"""
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(eval_set):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
test_time_cost = time_since(time.time() - test_start_time)
return equation_ac / eval_total, value_ac / eval_total, eval_total, test_time_cost
def test(self):
"""test model.
"""
self._load_model()
self.model.eval()
value_ac = 0
equation_ac = 0
eval_total = 0
self.output_result = []
test_start_time = time.time()
for batch in self.dataloader.load_data(DatasetType.Test):
batch_val_ac, batch_equ_ac = self._eval_batch(batch)
value_ac += batch_val_ac.count(True)
equation_ac += batch_equ_ac.count(True)
eval_total += len(batch_val_ac)
self.best_test_equ_accuracy = equation_ac/eval_total
self.best_test_value_accuracy = value_ac/eval_total
test_time_cost = time_since(time.time() - test_start_time)
self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"
% (eval_total, equation_ac/eval_total, value_ac/eval_total, test_time_cost))
self._save_output()
def _build_optimizer(self):
no_w_decay = {'bias', 'norm', 'Norm', '_embedding'}
parameters = [((2 if 'encoder.embeddings' in n else (1 if 'encoder' in n else 0),
any(t in n for t in no_w_decay)), p)
for n, p in self.model.named_parameters()]
parameters = groupby(
sorted(parameters, key=lambda t: t[0]), key=lambda t: t[0])
optimizer_grouped_parameters = []
for (encoder_type_flag, is_without_wd), group in parameters:
group = {'params': [p for _, p in group]}
if is_without_wd:
group['weight_decay'] = 0.0
if encoder_type_flag == 2 and self.config['fix_encoder_embedding']:
group['lr'] = 0.0
elif encoder_type_flag == 1:
group['lr'] = self.config["learning_rate"]
optimizer_grouped_parameters.append(group)
from torch_optimizer import Lamb
from torch.optim.lr_scheduler import LambdaLR
self.optimizer = Lamb(optimizer_grouped_parameters,
lr=self.config["learning_rate"], eps=1e-08, weight_decay=0.0)
self.warmup_steps = int(self._step_per_epoch *
self.config['epoch_warmup'])
def lr_lambda(current_step):
if current_step < self.warmup_steps:
return float(current_step) / float(max(1, self.warmup_steps))
return max(
0.0, float(self._steps_to_go - current_step) /
float(max(1, self._steps_to_go - self.warmup_steps))
)
if self.warmup_steps >= 0:
# Build scheduler before restoration
self.scheduler = LambdaLR(self.optimizer, lr_lambda, -1)
#self.optimizer = Lamb(self.model.parameters(), lr=self.config["learning_rate"], eps=1e-08, weight_decay=0.0)
def _normalize_gradients(self, *parameters):
"""
Normalize gradients (as in NVLAMB optimizer)
:param parameters: List of parameters whose gradient will be normalized.
:return: Frobenious Norm before applying normalization.
"""
parameters = [p for p in parameters if p.grad is not None]
# Compute total Frobenius norm
total_norm = 0
for p in parameters:
total_norm += p.grad.data.norm(2.0).item() ** 2.0
total_norm = total_norm ** 0.5
# Compute normalization constant. Set 1E-12 for minimum value to avoid inf.
normalizer = 1.0 / max(total_norm, 1e-12)
for p in parameters:
p.grad.data.mul_(normalizer)
return total_norm
def param_search(self):
"""hyper-parameter search.
"""
train_batch_size = self.config["train_batch_size"]
epoch_nums = self.config["epoch_nums"]
self.train_batch_nums = int(
self.dataloader.trainset_nums / train_batch_size) + 1
self.logger.info("start training...")
for epo in range(self.start_epoch, epoch_nums):
self.epoch_i = epo + 1
self.model.train()
loss_total, train_time_cost = self._train_epoch()
if epo % self.test_step == 0 or epo > epoch_nums - 5:
test_equ_ac, test_val_ac, test_total, test_time_cost = self.evaluate(
DatasetType.Test)
tune.report(accuracy=test_val_ac)
class PretrainSeq2SeqTrainer(SupervisedTrainer):
def __init__(self, config, model, dataloader, evaluator):
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
if self.config['share_vocab']:
self.optimizer = torch.optim.Adam(
[
{"params": self.in_embedder.parameters(
), "lr": self.config["embedding_learning_rate"]},
{"params": self.encoder.parameters()},
{"params": self.decoder.parameters()},
{"params": self.model.generate_linear.parameters()}
],
lr=self.config["learning_rate"]
)
else:
self.optimizer = torch.optim.Adam(
[
{"params": self.model.in_embedder.parameters(
), "lr": self.config["embedding_learning_rate"]},
{"params": self.model.out_embedder.parameters()},
{"params": self.model.encoder.parameters()},
{"params": self.model.decoder.parameters()},
{"params": self.model.generate_linear.parameters()}
],
lr=self.config["learning_rate"]
)
class PretrainTRNNTrainer(TRNNTrainer):
def __init__(self, config, model, dataloader, evaluator):
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
if self.config['share_vocab']:
self.optimizer = torch.optim.Adam(
[
{'params': self.model.seq2seq_in_embedder.parameters(
), 'lr': self.config["embedding_learning_rate"]},
{'params': self.model.seq2seq_encoder.parameters()},
{'params': self.model.seq2seq_decoder.parameters()},
{'params': self.model.seq2seq_gen_linear.parameters()}
],
lr=self.config["seq2seq_learning_rate"]
)
else:
self.optimizer = torch.optim.Adam(
[
{'params': self.model.seq2seq_in_embedder.parameters(
), 'lr': self.config["embedding_learning_rate"]},
{'params': self.model.seq2seq_out_embedder.parameters()},
{'params': self.model.seq2seq_encoder.parameters()},
{'params': self.model.seq2seq_decoder.parameters()},
{'params': self.model.seq2seq_gen_linear.parameters()}
],
lr=self.config["seq2seq_learning_rate"]
)
self.answer_module_optimizer = torch.optim.SGD(
[
{'params': self.model.answer_in_embedder.parameters(
), 'lr': self.config["embedding_learning_rate"]},
{'params': self.model.answer_encoder.parameters()},
{'params': self.model.answer_rnn.parameters()}
],
lr=self.config["ans_learning_rate"],
momentum=0.9
)
class MWPBertTrainer(GTSTrainer):
def __init__(self, config, model, dataloader, evaluator):
super().__init__(config, model, dataloader, evaluator)
def _build_optimizer(self):
self.encoder_optimizer = torch.optim.Adam(
self.model.encoder.parameters(),
self.config['encoding_learning_rate'],
weight_decay=self.config["weight_decay"]
)
self.decoder_optimizer = torch.optim.Adam(
self.model.decoder.parameters(),
self.config["learning_rate"],
weight_decay=self.config["weight_decay"]
)
self.node_generater_optimizer = torch.optim.Adam(
self.model.node_generater.parameters(),
self.config["learning_rate"],
weight_decay=self.config["weight_decay"]
)
self.merge_optimizer = torch.optim.Adam(
self.model.merge.parameters(),
self.config["learning_rate"],
weight_decay=self.config["weight_decay"]
)
# scheduler
self.encoder_scheduler = torch.optim.lr_scheduler.StepLR(self.encoder_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.decoder_scheduler = torch.optim.lr_scheduler.StepLR(self.decoder_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.node_generater_scheduler = torch.optim.lr_scheduler.StepLR(self.node_generater_optimizer,
step_size=self.config["step_size"], gamma=0.5)
self.merge_scheduler = torch.optim.lr_scheduler.StepLR(self.merge_optimizer, step_size=self.config["step_size"],
gamma=0.5)
def _save_checkpoint(self):
check_pnt = {
"model": self.model.state_dict(),
"encoder_optimizer": self.encoder_optimizer.state_dict(),
"decoder_optimizer": self.decoder_optimizer.state_dict(),
"generate_optimizer": self.node_generater_optimizer.state_dict(),
"merge_optimizer": self.merge_optimizer.state_dict(),
"encoder_scheduler": self.encoder_scheduler.state_dict(),
"decoder_scheduler": self.decoder_scheduler.state_dict(),
"generate_scheduler": self.node_generater_scheduler.state_dict(),
"merge_scheduler": self.merge_scheduler.state_dict(),
"start_epoch": self.epoch_i,
"best_valid_value_accuracy": self.best_valid_value_accuracy,
"best_valid_equ_accuracy": self.best_valid_equ_accuracy,
"best_test_value_accuracy": self.best_test_value_accuracy,
"best_test_equ_accuracy": self.best_test_equ_accuracy,
"best_folds_accuracy": self.best_folds_accuracy,
"fold_t": self.config["fold_t"]
}
torch.save(check_pnt, self.config["checkpoint_path"])
def _load_checkpoint(self):
check_pnt = torch.load(
self.config["checkpoint_path"], map_location=self.config["map_location"])
# load parameter of model
self.model.load_state_dict(check_pnt["model"])
# load parameter of optimizer
self.encoder_optimizer.load_state_dict(check_pnt["encoder_optimizer"])
self.decoder_optimizer.load_state_dict(check_pnt["decoder_optimizer"])
self.node_generater_optimizer.load_state_dict(
check_pnt["generate_optimizer"])
self.merge_optimizer.load_state_dict(check_pnt["merge_optimizer"])
# load parameter of scheduler
self.encoder_scheduler.load_state_dict(check_pnt["encoder_scheduler"])
self.decoder_scheduler.load_state_dict(check_pnt["decoder_scheduler"])
self.node_generater_scheduler.load_state_dict(
check_pnt["generate_scheduler"])
self.merge_scheduler.load_state_dict(check_pnt["merge_scheduler"])
# other parameter
self.start_epoch = check_pnt["start_epoch"]
self.best_valid_value_accuracy = check_pnt["best_valid_value_accuracy"]
self.best_valid_equ_accuracy = check_pnt["best_valid_equ_accuracy"]
self.best_test_value_accuracy = check_pnt["best_test_value_accuracy"]
self.best_test_equ_accuracy = check_pnt["best_test_equ_accuracy"]
self.best_folds_accuracy = check_pnt["best_folds_accuracy"]
def _scheduler_step(self):
self.encoder_scheduler.step()
self.decoder_scheduler.step()
self.node_generater_scheduler.step()
self.merge_scheduler.step()
def _optimizer_step(self):
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.node_generater_optimizer.step()
self.merge_optimizer.step()
|
en
| 0.609029
|
# -*- encoding: utf-8 -*- # @Author: <NAME> # @Time: 2021/08/29 22:14:01 # @File: supervised_trainer.py supervised trainer, used to implement training, testing, parameter searching in supervised learning. example of instantiation: >>> trainer = SupervisedTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: learning_rate (float): learning rate of model train_batch_size (int): the training batch size. epoch_nums (int): number of epochs. trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. #self._build_loss(config["symbol_size"], self.dataloader.dataset.out_symbol2idx[SpecialTokens.PAD_TOKEN]) #check_pnt = torch.load(self.config["checkpoint_path"],map_location="cpu") # load parameter of model # load parameter of optimizer # other parameter # Added by Shyamoli # Added to print questions #print("Test output Polish", test_out[i]) #print("Target Polish", target[i]) train model. training finished. best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f] best test result : equation accuracy [%2.3f] | value accuracy [%2.3f] evaluate model. Args: eval_set (str): [valid | test], the dataset for evaluation. Returns: tuple(float,float,int,str): equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time. test model. hyper-parameter search. gts trainer, used to implement training, testing, parameter searching for deep-learning model GTS. example of instantiation: >>> trainer = GTSTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: learning_rate (float): learning rate of model. embedding_learning_rate (float): learning rate of embedding module. train_batch_size (int): the training batch size. step_size (int): step_size of scheduler. epoch_nums (int): number of epochs. trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. # optimizer # self.encoder_optimizer = torch.optim.Adam( # [ # {'params': self.model.embedder.parameters()}, \ # {'params': self.model.encoder.parameters()} # ], # self.config["learning_rate"] # ) # scheduler # load parameter of model # load parameter of optimizer # load parameter of scheduler # other parameter train model. training finished. best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f] best test result : equation accuracy [%2.3f] | value accuracy [%2.3f] evaluate model. Args: eval_set (str): [valid | test], the dataset for evaluation. Returns: tuple(float,float,int,str): equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time. test model. hyper-parameter search. multiencdec trainer, used to implement training, testing, parameter searching for deep-learning model MultiE&D. example of instantiation: >>> trainer = MultiEncDecTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: learning_rate (float): learning rate of model. train_batch_size (int): the training batch size. step_size (int): step_size of scheduler. epoch_nums (int): number of epochs. trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. # optimizer # self.embedder_optimizer = torch.optim.Adam(self.model.embedder.parameters(), self.config["learning_rate"], weight_decay=self.config["weight_decay"]) #self.optimizer = torch.optim.Adam(self.model.parameters(), self.config["learning_rate"], weight_decay=self.config["weight_decay"]) # scheduler #self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.config["step_size"], gamma=0.5) # load parameter of model # load parameter of optimizer # self.optimizer.load_state_dict(check_pnt['optimizer']) # load parameter of scheduler # self.scheduler.load_state_dict(check_pnt['scheduler']) # other parameter # self.scheduler.step() # self.optimizer.step() graph2tree trainer, used to implement training, testing, parameter searching for deep-learning model Graph2Tree. example of instantiation: >>> trainer = Graph2TreeTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: learning_rate (float): learning rate of model. embedding_learning_rate (float): learning rate of embedding module. train_batch_size (int): the training batch size. step_size (int): step_size of scheduler. epoch_nums (int): number of epochs. trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. treelstm trainer, used to implement training, testing, parameter searching for deep-learning model TreeLSTM. example of instantiation: >>> trainer = TreeLSTMTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: learning_rate (float): learning rate of model. train_batch_size (int): the training batch size. step_size (int): step_size of scheduler. epoch_nums (int): number of epochs. trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. # optimizer # scheduler # load parameter of model # load parameter of optimizer # load parameter of scheduler # other parameter train model. training finished. best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f] best test result : equation accuracy [%2.3f] | value accuracy [%2.3f] evaluate model. Args: eval_set (str): [valid | test], the dataset for evaluation. Returns: tuple(float,float,int,str): equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time. test model. sausolver trainer, used to implement training, testing, parameter searching for deep-learning model SAUSolver. example of instantiation: >>> trainer = SAUSolverTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: learning_rate (float): learning rate of model. train_batch_size (int): the training batch size. step_size (int): step_size of scheduler. epoch_nums (int): number of epochs. trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. # batch['ans'][idx] = [12,8] # scheduler # load parameter of model # load parameter of optimizer # load parameter of scheduler # other parameter trnn trainer, used to implement training, testing, parameter searching for deep-learning model TRNN. example of instantiation: >>> trainer = TRNNTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: seq2seq_learning_rate (float): learning rate of seq2seq module. ans_learning_rate (float): learning rate of answer module. train_batch_size (int): the training batch size. step_size (int): step_size of scheduler. epoch_nums (int): number of epochs. trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. #self.optimizer = torch.optim.Adam(self.model.parameters(),self.config["learning_rate"]) # first stage # second stage # self.seq2seq_optimizer.step() # self.answer_module_optimizer.step() train model. # self.test(DatasetType.Test) # self.test(DatasetType.Train) training finished. best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f] best test result : equation accuracy [%2.3f] | value accuracy [%2.3f] evaluate model. Args: eval_set (str): [valid | test], the dataset for evaluation. Returns: tuple(float,float,float,float,int,str): equation accuracy, value accuracy, seq2seq module accuracy, answer module accuracy, count of evaluated datas, formatted time string of evaluation time. # self.logger.info("test total [%d] | test equ acc [%2.3f] | test value acc [%2.3f] | test time %s"\ # %(eval_total,equation_ac/eval_total,value_ac/eval_total,test_time_cost)) hyper-parameter search. saligned trainer, used to implement training, testing, parameter searching for deep-learning model S-aligned. example of instantiation: >>> trainer = SalignedTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: learning_rate (float): learning rate of model train_batch_size (int): the training batch size. epoch_nums (int): number of epochs. step_size (int): step_size of scheduler. trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. # optimizer # scheduler # load parameter of model # load parameter of optimizer # load parameter of scheduler # other parameter # change NUM # target_mask = torch.ge(op_target, self.min_NUM) * torch.le(op_target, self.max_NUM).to(torch.long) # op_target = (op_target + self.UNK - self.min_NUM + 4) * target_mask + op_target * (1 - target_mask) # change constants # change unk # change +/-/*// # change padding #print(eq_len, num_list) # attach prefix/postfix # if self.do_addeql: #op_target[torch.arange(batch_size).unsqueeze(1), eq_len] = self.model.EQL #print('op_target', op_target[:3, :10]) #print('gen_var_prefix', self.max_NUM, num_list, gen_var_prefix) # gen_var_prefix = torch.zeros((batch_size, 1), dtype=torch.long).to(self.model._device) + 14 #self.max_NUM + 4 # if self.do_addeql: # else: # eq_len = [(idx + 2) for idx in eq_len] # print(self.dataloader.dataset.out_symbol2idx); #exit() # if batch_idx >= 100: continue #print('batch_idx', batch_idx) train model. training finished. best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f] best test result : equation accuracy [%2.3f] | value accuracy [%2.3f] evaluate model. Args: eval_set (str): [valid | test], the dataset for evaluation. Returns: tuple(float,float,int,str): equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time. # batch["equation"], batch['equ len'] = self.adjust_equ(batch["raw_equation"], batch['equ len'], # batch['num list']) test model. #check_pnt = torch.load(self.config["checkpoint_path"],map_location="cpu") # load parameter of model # load parameter of optimizer # other parameter tsn trainer, used to implement training, testing, parameter searching for deep-learning model TSN. example of instantiation: >>> trainer = TSNTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: learning_rate (float): learning rate of model train_batch_size (int): the training batch size. epoch_nums (int): number of epochs. step_size (int): step_size of scheduler. trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. # optimizer # scheduler # load parameter of model # load parameter of optimizer # load parameter of scheduler # other parameter train model. training finished. best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f] best test result : equation accuracy [%2.3f] | value accuracy [%2.3f] evaluate teacher net. Args: eval_set (str): [valid | test], the dataset for evaluation. Returns: tuple(float,float,int,str): equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time. evaluate student net. Args: eval_set (str): [valid | test], the dataset for evaluation. Returns: tuple(float,float,float,float,float,float,int,str): equation accuracy, value accuracy, equation accuracy of student net 1, value accuracy of student net 1, equation accuracy of student net 2, value accuracy of student net 2, count of evaluated datas, formatted time string of evaluation time. test model. ept trainer, used to implement training, testing, parameter searching for deep-learning model EPT. example of instantiation: >>> trainer = EPTTrainer(config, model, dataloader, evaluator) for training: >>> trainer.fit() for testing: >>> trainer.test() for parameter searching: >>> trainer.param_search() Args: config (config): An instance object of Config, used to record parameter information. model (Model): An object of deep-learning model. dataloader (Dataloader): dataloader object. evaluator (Evaluator): evaluator object. expected that config includes these parameters below: learning_rate (float): learning rate of model train_batch_size (int): the training batch size. epoch_nums (int): number of epochs. gradient_accumulation_steps (int): gradient accumulation steps. epoch_warmup (int): epoch warmup. fix_encoder_embedding (bool): whether require gradient of embedding module of encoder trained_model_path (str): a path of file which is used to save parameters of best model. checkpoint_path (str): a path of file which is used save checkpoint of training progress. output_path (str|None): a path of a json file which is used to save test output infomation fo model. resume (bool): start training from last checkpoint. validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset. test_step (int): the epoch number of training after which conducts the evaluation on test. best_folds_accuracy (list|None): when running k-fold cross validation, this keeps the accuracy of folds that already run. # load parameter of model # load parameter of optimizer # load parameter of scheduler seq, seq_length, group_nums, target # If clipping threshold is set, then clip the gradient # if self._config.gradient_normalize: # # If normalizing gradient is set, then normalize the gradient # self._normalize_gradients(*self.model.parameters()) # Apply optimizer & scheduler # If clipping threshold is set, then clip the gradient # if self._config.gradient_normalize: # # If normalizing gradient is set, then normalize the gradient # self._normalize_gradients(*self.model.parameters()) # Apply optimizer & scheduler train model. training finished. best valid result: equation accuracy [%2.3f] | value accuracy [%2.3f] best test result : equation accuracy [%2.3f] | value accuracy [%2.3f] evaluate model. Args: eval_set (str): [valid | test], the dataset for evaluation. Returns: tuple(float,float,int,str): equation accuracy, value accuracy, count of evaluated datas, formatted time string of evaluation time. test model. # Build scheduler before restoration #self.optimizer = Lamb(self.model.parameters(), lr=self.config["learning_rate"], eps=1e-08, weight_decay=0.0) Normalize gradients (as in NVLAMB optimizer) :param parameters: List of parameters whose gradient will be normalized. :return: Frobenious Norm before applying normalization. # Compute total Frobenius norm # Compute normalization constant. Set 1E-12 for minimum value to avoid inf. hyper-parameter search. # scheduler # load parameter of model # load parameter of optimizer # load parameter of scheduler # other parameter
| 2.472692
| 2
|
data_structures/heap/min_heap.py
|
mlopezarango/Python
| 7
|
6629684
|
# Min head data structure
# with decrease key functionality - in O(log(n)) time
class Node:
def __init__(self, name, val):
self.name = name
self.val = val
def __str__(self):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__(self, other):
return self.val < other.val
class MinHeap:
"""
>>> r = Node("R", -1)
>>> b = Node("B", 6)
>>> a = Node("A", 3)
>>> x = Node("X", 1)
>>> e = Node("E", 4)
>>> print(b)
Node(B, 6)
>>> myMinHeap = MinHeap([r, b, a, x, e])
>>> myMinHeap.decrease_key(b, -17)
>>> print(b)
Node(B, -17)
>>> print(myMinHeap["B"])
-17
"""
def __init__(self, array):
self.idx_of_element = {}
self.heap_dict = {}
self.heap = self.build_heap(array)
def __getitem__(self, key):
return self.get_value(key)
def get_parent_idx(self, idx):
return (idx - 1) // 2
def get_left_child_idx(self, idx):
return idx * 2 + 1
def get_right_child_idx(self, idx):
return idx * 2 + 2
def get_value(self, key):
return self.heap_dict[key]
def build_heap(self, array):
lastIdx = len(array) - 1
startFrom = self.get_parent_idx(lastIdx)
for idx, i in enumerate(array):
self.idx_of_element[i] = idx
self.heap_dict[i.name] = i.val
for i in range(startFrom, -1, -1):
self.sift_down(i, array)
return array
# this is min-heapify method
def sift_down(self, idx, array):
while True:
l = self.get_left_child_idx(idx)
r = self.get_right_child_idx(idx)
smallest = idx
if l < len(array) and array[l] < array[idx]:
smallest = l
if r < len(array) and array[r] < array[smallest]:
smallest = r
if smallest != idx:
array[idx], array[smallest] = array[smallest], array[idx]
(
self.idx_of_element[array[idx]],
self.idx_of_element[array[smallest]],
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
idx = smallest
else:
break
def sift_up(self, idx):
p = self.get_parent_idx(idx)
while p >= 0 and self.heap[p] > self.heap[idx]:
self.heap[p], self.heap[idx] = self.heap[idx], self.heap[p]
self.idx_of_element[self.heap[p]], self.idx_of_element[self.heap[idx]] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
idx = p
p = self.get_parent_idx(idx)
def peek(self):
return self.heap[0]
def remove(self):
self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
self.idx_of_element[self.heap[0]], self.idx_of_element[self.heap[-1]] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
x = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap)
return x
def insert(self, node):
self.heap.append(node)
self.idx_of_element[node] = len(self.heap) - 1
self.heap_dict[node.name] = node.val
self.sift_up(len(self.heap) - 1)
def is_empty(self):
return True if len(self.heap) == 0 else False
def decrease_key(self, node, newValue):
assert (
self.heap[self.idx_of_element[node]].val > newValue
), "newValue must be less that current value"
node.val = newValue
self.heap_dict[node.name] = newValue
self.sift_up(self.idx_of_element[node])
## USAGE
r = Node("R", -1)
b = Node("B", 6)
a = Node("A", 3)
x = Node("X", 1)
e = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
myMinHeap = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in myMinHeap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
myMinHeap.decrease_key(b, -17)
# After
for i in myMinHeap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
# Min head data structure
# with decrease key functionality - in O(log(n)) time
class Node:
def __init__(self, name, val):
self.name = name
self.val = val
def __str__(self):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__(self, other):
return self.val < other.val
class MinHeap:
"""
>>> r = Node("R", -1)
>>> b = Node("B", 6)
>>> a = Node("A", 3)
>>> x = Node("X", 1)
>>> e = Node("E", 4)
>>> print(b)
Node(B, 6)
>>> myMinHeap = MinHeap([r, b, a, x, e])
>>> myMinHeap.decrease_key(b, -17)
>>> print(b)
Node(B, -17)
>>> print(myMinHeap["B"])
-17
"""
def __init__(self, array):
self.idx_of_element = {}
self.heap_dict = {}
self.heap = self.build_heap(array)
def __getitem__(self, key):
return self.get_value(key)
def get_parent_idx(self, idx):
return (idx - 1) // 2
def get_left_child_idx(self, idx):
return idx * 2 + 1
def get_right_child_idx(self, idx):
return idx * 2 + 2
def get_value(self, key):
return self.heap_dict[key]
def build_heap(self, array):
lastIdx = len(array) - 1
startFrom = self.get_parent_idx(lastIdx)
for idx, i in enumerate(array):
self.idx_of_element[i] = idx
self.heap_dict[i.name] = i.val
for i in range(startFrom, -1, -1):
self.sift_down(i, array)
return array
# this is min-heapify method
def sift_down(self, idx, array):
while True:
l = self.get_left_child_idx(idx)
r = self.get_right_child_idx(idx)
smallest = idx
if l < len(array) and array[l] < array[idx]:
smallest = l
if r < len(array) and array[r] < array[smallest]:
smallest = r
if smallest != idx:
array[idx], array[smallest] = array[smallest], array[idx]
(
self.idx_of_element[array[idx]],
self.idx_of_element[array[smallest]],
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
idx = smallest
else:
break
def sift_up(self, idx):
p = self.get_parent_idx(idx)
while p >= 0 and self.heap[p] > self.heap[idx]:
self.heap[p], self.heap[idx] = self.heap[idx], self.heap[p]
self.idx_of_element[self.heap[p]], self.idx_of_element[self.heap[idx]] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
idx = p
p = self.get_parent_idx(idx)
def peek(self):
return self.heap[0]
def remove(self):
self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
self.idx_of_element[self.heap[0]], self.idx_of_element[self.heap[-1]] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
x = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap)
return x
def insert(self, node):
self.heap.append(node)
self.idx_of_element[node] = len(self.heap) - 1
self.heap_dict[node.name] = node.val
self.sift_up(len(self.heap) - 1)
def is_empty(self):
return True if len(self.heap) == 0 else False
def decrease_key(self, node, newValue):
assert (
self.heap[self.idx_of_element[node]].val > newValue
), "newValue must be less that current value"
node.val = newValue
self.heap_dict[node.name] = newValue
self.sift_up(self.idx_of_element[node])
## USAGE
r = Node("R", -1)
b = Node("B", 6)
a = Node("A", 3)
x = Node("X", 1)
e = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
myMinHeap = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in myMinHeap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
myMinHeap.decrease_key(b, -17)
# After
for i in myMinHeap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
en
| 0.516044
|
# Min head data structure # with decrease key functionality - in O(log(n)) time >>> r = Node("R", -1) >>> b = Node("B", 6) >>> a = Node("A", 3) >>> x = Node("X", 1) >>> e = Node("E", 4) >>> print(b) Node(B, 6) >>> myMinHeap = MinHeap([r, b, a, x, e]) >>> myMinHeap.decrease_key(b, -17) >>> print(b) Node(B, -17) >>> print(myMinHeap["B"]) -17 # this is min-heapify method ## USAGE # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before # After
| 3.511952
| 4
|
aboot.py
|
Eliminater74/abootool
| 1
|
6629685
|
<filename>aboot.py
"""
Author: <NAME> / Aleph Research / HCL Technologies
"""
import string
import json
import hashlib
import os
from serializable import Serializable
from log import *
from config import Config
bootloaders_by_device = {}
bootloaders_by_oem = {}
bootloaders = None
def get_bootloaders(path=Config.data_path):
global bootloaders
if bootloaders:
return bootloaders
bootloaders = []
n = 0
for f in os.listdir(path):
if f.endswith(".json"):
bl = ABOOT.create_from_json(os.path.join(path, f))
bootloaders.append(bl)
if not bootloaders_by_oem.has_key(bl.oem):
bootloaders_by_oem[bl.oem] = []
bootloaders_by_oem[bl.oem].append(bl)
if not bootloaders_by_device.has_key(bl.device):
bootloaders_by_device[bl.device] = []
bootloaders_by_device[bl.device].append(bl)
n+=1
D("loaded %d bootloaders (%d devices, %d OEMs)", n, len(bootloaders_by_device), len(bootloaders_by_oem))
return bootloaders
def by_oem(oem = None):
all()
if not oem:
return bootloaders_by_oem
try:
D("bootloader.by_oem[%s]: %s", oem, bootloaders_by_oem[oem])
return bootloaders_by_oem[oem]
except KeyError:
return []
def by_device(device = None):
all()
if not device:
return bootloaders_by_device
try:
D("bootloader.by_device[%s]: %s", device, bootloaders_by_device[device])
return bootloaders_by_device[device]
except KeyError:
return []
def all():
if not bootloaders:
get_bootloaders()
return bootloaders
class ABOOT(Serializable):
@classmethod
def create_from_json(cls, path):
with open(path, "rb") as fh:
data = json.load(fh)
return ABOOT().set_data(data)
@classmethod
def create_from_bootloader_image(cls, fp, oem, device, build, src, name, strprefix=""):
data = fp.read()
sha256 = hashlib.sha256(data).hexdigest()
D("SHA256 = %s", sha256)
s = ""
printable = set(string.printable)
strings = set()
i = 0
for c in data:
if 0 == i % 2**20:
T("%d", i >> 20)
if c in printable:
s += c
else:
if "" != s:
if s.startswith(strprefix):
strings.add(s)
s = ""
i += 1
strings = list(strings)
strings.sort()
return ABOOT().set_data({'src': src,
'name': name,
'sha256': sha256,
'strings': strings,
'oem': oem,
'device': device,
'build': build})
def __repr__(self):
return "%s/%s/%s" % (self.oem, self.device, self.build)
|
<filename>aboot.py
"""
Author: <NAME> / Aleph Research / HCL Technologies
"""
import string
import json
import hashlib
import os
from serializable import Serializable
from log import *
from config import Config
bootloaders_by_device = {}
bootloaders_by_oem = {}
bootloaders = None
def get_bootloaders(path=Config.data_path):
global bootloaders
if bootloaders:
return bootloaders
bootloaders = []
n = 0
for f in os.listdir(path):
if f.endswith(".json"):
bl = ABOOT.create_from_json(os.path.join(path, f))
bootloaders.append(bl)
if not bootloaders_by_oem.has_key(bl.oem):
bootloaders_by_oem[bl.oem] = []
bootloaders_by_oem[bl.oem].append(bl)
if not bootloaders_by_device.has_key(bl.device):
bootloaders_by_device[bl.device] = []
bootloaders_by_device[bl.device].append(bl)
n+=1
D("loaded %d bootloaders (%d devices, %d OEMs)", n, len(bootloaders_by_device), len(bootloaders_by_oem))
return bootloaders
def by_oem(oem = None):
all()
if not oem:
return bootloaders_by_oem
try:
D("bootloader.by_oem[%s]: %s", oem, bootloaders_by_oem[oem])
return bootloaders_by_oem[oem]
except KeyError:
return []
def by_device(device = None):
all()
if not device:
return bootloaders_by_device
try:
D("bootloader.by_device[%s]: %s", device, bootloaders_by_device[device])
return bootloaders_by_device[device]
except KeyError:
return []
def all():
if not bootloaders:
get_bootloaders()
return bootloaders
class ABOOT(Serializable):
@classmethod
def create_from_json(cls, path):
with open(path, "rb") as fh:
data = json.load(fh)
return ABOOT().set_data(data)
@classmethod
def create_from_bootloader_image(cls, fp, oem, device, build, src, name, strprefix=""):
data = fp.read()
sha256 = hashlib.sha256(data).hexdigest()
D("SHA256 = %s", sha256)
s = ""
printable = set(string.printable)
strings = set()
i = 0
for c in data:
if 0 == i % 2**20:
T("%d", i >> 20)
if c in printable:
s += c
else:
if "" != s:
if s.startswith(strprefix):
strings.add(s)
s = ""
i += 1
strings = list(strings)
strings.sort()
return ABOOT().set_data({'src': src,
'name': name,
'sha256': sha256,
'strings': strings,
'oem': oem,
'device': device,
'build': build})
def __repr__(self):
return "%s/%s/%s" % (self.oem, self.device, self.build)
|
en
| 0.616986
|
Author: <NAME> / Aleph Research / HCL Technologies
| 2.137425
| 2
|
modules/xia2/command_line/wilson_stuff.py
|
jorgediazjr/dials-dev20191018
| 0
|
6629686
|
from __future__ import absolute_import, division, print_function
import sys
from iotbx import mtz
from mmtbx.scaling import data_statistics
m = mtz.object(sys.argv[1])
mas = m.as_miller_arrays()
data = None
for ma in mas:
if ma.is_xray_intensity_array():
data = ma
break
def nres_from_mtz(m):
sg = m.space_group()
uc = m.crystals()[0].unit_cell()
n_ops = len(sg.all_ops())
v_asu = uc.volume() / n_ops
return v_asu / (2.7 * 128)
n_res = nres_from_mtz(m)
wilson_scaling = data_statistics.wilson_scaling(miller_array=data, n_residues=n_res)
wilson_scaling.show()
|
from __future__ import absolute_import, division, print_function
import sys
from iotbx import mtz
from mmtbx.scaling import data_statistics
m = mtz.object(sys.argv[1])
mas = m.as_miller_arrays()
data = None
for ma in mas:
if ma.is_xray_intensity_array():
data = ma
break
def nres_from_mtz(m):
sg = m.space_group()
uc = m.crystals()[0].unit_cell()
n_ops = len(sg.all_ops())
v_asu = uc.volume() / n_ops
return v_asu / (2.7 * 128)
n_res = nres_from_mtz(m)
wilson_scaling = data_statistics.wilson_scaling(miller_array=data, n_residues=n_res)
wilson_scaling.show()
|
none
| 1
| 1.964666
| 2
|
|
fouruc/base/templatetags/filter_extras.py
|
Alfareiza/4uc-manager-silver
| 0
|
6629687
|
import itertools
import math
import datetime
from django import template
register = template.Library()
@register.filter
def date_from_minute(m):
"""
Calc date from minutes and return a string with the date.
:param minutes: 351702: int
:return: 'Jan 25 de 2021 15:44'
"""
if m is not None:
now = datetime.datetime.utcnow()
deltadate = now - datetime.timedelta(minutes=m)
return deltadate.strftime("%d %b %Y %H:%M")
else:
return 'Sem Informação'
@register.filter
def minutes_in_time(minutes):
'''
Calc the time from minutes and return a string with the calculation.
Returns empty string on any error.
Ex.: minutes: 256587
:param minutes: 351702 :int
:return: 'há 8 meses' :str
'''
try:
if minutes is not None:
hours = minutes // 60
days = hours / 24
days = math.ceil(days)
months = days // 30
year = months // 12
if year > 0:
return f"há {year} Anos"
elif months > 0:
return f"há {months} Meses"
elif days == 1:
return f"há {days} Día"
elif days > 1:
return f"há {days} Días"
elif hours > 0:
return f"há {hours} Horas"
elif minutes > 0:
return f"há {minutes} Minutos"
else:
return 'Sem Informação'
except Exception as e:
print('Error > ', e)
return ''
@register.filter
def format_str_date(date_time_str):
"""
Transform str to date
:param date_str: '2021-06-22 12:16:27'
:return:
"""
if date_time_str is not None:
date_time_obj = datetime.datetime.strptime(str(date_time_str), '%Y-%m-%d %H:%M:%S')
date_time_obj.strftime("%d %b %Y %H:%M:%S")
print(date_time_obj)
return date_time_obj
else:
return '-'
@register.filter
def number_day_to_name_day(number):
"""
Receives a number where 0 is sunday and returns the name of the day
:param number:
:return:
"""
weekDay = {0: 'Domingo', 1: 'Segunda', 2: 'Terça', 3: 'Quarta', 4: 'Quinta', 5: 'Sexta', 6: 'Sábado', 7: 'Domingo'}
return weekDay[number]
@register.filter
def dict_key(dictionary, key):
"""
Receive and dictinoary and a key
:param dictionary: 'playlists': {'0': {'id': 3, 'name': 'Novo'}, '1': {'id': 3, 'name': 'Novo'}}
:param key: '0' :str
:return: The value of the dict. Ex: {'id': 3, 'name': 'Novo'} :dict
"""
return dictionary[int(key)]
@register.filter
def all_records(conta):
qty = len(conta.categories.all()) + len(conta.players.all()) + len(conta.playlists.all()) \
+ len(conta.medias.all()) + len(conta.records.all())
return qty
@register.filter
def group_by_day(list_all_objects):
"""
Recebe uma lista de objetos onde cada um deles possui um atributo tipo date e logo ordena eles por dia.
:param: list_all_objects: Register.objects.all(): django.db.models.query.QuerySet
:return: objects_day_ordered: [[objetos_do_dia_1][objetos_do_dia_2][objetos_do_dia_3]]: list of lists
"""
key_func = lambda x: x.date.day
objects_day_ordered = []
for key, group in itertools.groupby(list_all_objects, key_func):
objects_day_ordered.insert(0, list(group))
return objects_day_ordered
@register.filter
def player_full_name(id, conta):
try:
return (player := conta.players.get(player_id=id))
except Exception as e:
print('Error: ', e)
return f"{id}"
@register.filter
def media_full_name(id, conta):
try:
return (media := conta.medias.get(media_id=id))
except Exception as e:
print('Error: ', e)
return f"{id}"
|
import itertools
import math
import datetime
from django import template
register = template.Library()
@register.filter
def date_from_minute(m):
"""
Calc date from minutes and return a string with the date.
:param minutes: 351702: int
:return: 'Jan 25 de 2021 15:44'
"""
if m is not None:
now = datetime.datetime.utcnow()
deltadate = now - datetime.timedelta(minutes=m)
return deltadate.strftime("%d %b %Y %H:%M")
else:
return 'Sem Informação'
@register.filter
def minutes_in_time(minutes):
'''
Calc the time from minutes and return a string with the calculation.
Returns empty string on any error.
Ex.: minutes: 256587
:param minutes: 351702 :int
:return: 'há 8 meses' :str
'''
try:
if minutes is not None:
hours = minutes // 60
days = hours / 24
days = math.ceil(days)
months = days // 30
year = months // 12
if year > 0:
return f"há {year} Anos"
elif months > 0:
return f"há {months} Meses"
elif days == 1:
return f"há {days} Día"
elif days > 1:
return f"há {days} Días"
elif hours > 0:
return f"há {hours} Horas"
elif minutes > 0:
return f"há {minutes} Minutos"
else:
return 'Sem Informação'
except Exception as e:
print('Error > ', e)
return ''
@register.filter
def format_str_date(date_time_str):
"""
Transform str to date
:param date_str: '2021-06-22 12:16:27'
:return:
"""
if date_time_str is not None:
date_time_obj = datetime.datetime.strptime(str(date_time_str), '%Y-%m-%d %H:%M:%S')
date_time_obj.strftime("%d %b %Y %H:%M:%S")
print(date_time_obj)
return date_time_obj
else:
return '-'
@register.filter
def number_day_to_name_day(number):
"""
Receives a number where 0 is sunday and returns the name of the day
:param number:
:return:
"""
weekDay = {0: 'Domingo', 1: 'Segunda', 2: 'Terça', 3: 'Quarta', 4: 'Quinta', 5: 'Sexta', 6: 'Sábado', 7: 'Domingo'}
return weekDay[number]
@register.filter
def dict_key(dictionary, key):
"""
Receive and dictinoary and a key
:param dictionary: 'playlists': {'0': {'id': 3, 'name': 'Novo'}, '1': {'id': 3, 'name': 'Novo'}}
:param key: '0' :str
:return: The value of the dict. Ex: {'id': 3, 'name': 'Novo'} :dict
"""
return dictionary[int(key)]
@register.filter
def all_records(conta):
qty = len(conta.categories.all()) + len(conta.players.all()) + len(conta.playlists.all()) \
+ len(conta.medias.all()) + len(conta.records.all())
return qty
@register.filter
def group_by_day(list_all_objects):
"""
Recebe uma lista de objetos onde cada um deles possui um atributo tipo date e logo ordena eles por dia.
:param: list_all_objects: Register.objects.all(): django.db.models.query.QuerySet
:return: objects_day_ordered: [[objetos_do_dia_1][objetos_do_dia_2][objetos_do_dia_3]]: list of lists
"""
key_func = lambda x: x.date.day
objects_day_ordered = []
for key, group in itertools.groupby(list_all_objects, key_func):
objects_day_ordered.insert(0, list(group))
return objects_day_ordered
@register.filter
def player_full_name(id, conta):
try:
return (player := conta.players.get(player_id=id))
except Exception as e:
print('Error: ', e)
return f"{id}"
@register.filter
def media_full_name(id, conta):
try:
return (media := conta.medias.get(media_id=id))
except Exception as e:
print('Error: ', e)
return f"{id}"
|
en
| 0.289601
|
Calc date from minutes and return a string with the date. :param minutes: 351702: int :return: 'Jan 25 de 2021 15:44' Calc the time from minutes and return a string with the calculation. Returns empty string on any error. Ex.: minutes: 256587 :param minutes: 351702 :int :return: 'há 8 meses' :str Transform str to date :param date_str: '2021-06-22 12:16:27' :return: Receives a number where 0 is sunday and returns the name of the day :param number: :return: Receive and dictinoary and a key :param dictionary: 'playlists': {'0': {'id': 3, 'name': 'Novo'}, '1': {'id': 3, 'name': 'Novo'}} :param key: '0' :str :return: The value of the dict. Ex: {'id': 3, 'name': 'Novo'} :dict Recebe uma lista de objetos onde cada um deles possui um atributo tipo date e logo ordena eles por dia. :param: list_all_objects: Register.objects.all(): django.db.models.query.QuerySet :return: objects_day_ordered: [[objetos_do_dia_1][objetos_do_dia_2][objetos_do_dia_3]]: list of lists
| 3.405508
| 3
|
preorderTraversalNoRecursive.py
|
saai/LeetcodePythonSolutions
| 0
|
6629688
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer[]}
def preorderTraversal(self, root):
r = []
if root == None:
return r
stack = []
new_root = TreeNode(0)
new_root.right = root
stack.append(new_root)
while(len(stack)!= 0):
node = stack.pop()
rroot = node.right
while(rroot!=None):
r.append(rroot.val)
stack.append(rroot)
rroot = rroot.left
return r
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer[]}
def preorderTraversal(self, root):
r = []
if root == None:
return r
stack = []
new_root = TreeNode(0)
new_root.right = root
stack.append(new_root)
while(len(stack)!= 0):
node = stack.pop()
rroot = node.right
while(rroot!=None):
r.append(rroot.val)
stack.append(rroot)
rroot = rroot.left
return r
|
en
| 0.413056
|
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None # @param {TreeNode} root # @return {integer[]}
| 3.771079
| 4
|
FlappyBird.py
|
billztee7/FlappyBird-NEAT
| 0
|
6629689
|
<reponame>billztee7/FlappyBird-NEAT
import os
import random
import time
import neat
import pygame
WIN_WIDTH = 500
WIN_HEIGHT = 800
BIRD_IMGS = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird1.png"))), pygame.transform.scale2x(
pygame.image.load(os.path.join("imgs", "bird2.png"))), pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird3.png")))]
PIPE_IMG = pygame.transform.scale2x(
pygame.image.load(os.path.join("imgs", "pipe.png")))
BASE_IMG = pygame.transform.scale2x(
pygame.image.load(os.path.join("imgs", "base.png")))
BACK_IMG = pygame.transform.scale2x(
pygame.image.load(os.path.join("imgs", "bg.png")))
pygame.font.init()
STAT_FONT = pygame.font.SysFont("comicsans", 50)
class Bird:
IMGS = BIRD_IMGS
MAX_ROTATION = 25
ROTATION_VELOCITY = 20
ANIMATION_TIME = 5
def __init__(self, x, y):
self.x = x
self.y = y
self.tilt = 0
self.tick_count = 0
self.velocity = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self):
self.velocity = -10.5
self.tick_count = 0
self.height = self.y
def move(self):
self.tick_count += 1
distance = self.velocity*self.tick_count + 1.5*self.tick_count**2
if distance >= 16:
distance = 16
if distance < 0:
distance -= 2
self.y = self.y + distance
# Setting tilt to fixed value upon reaching max height
if distance < 0 or self.y < self.height + 50:
if self.tilt < self.MAX_ROTATION:
self.tilt = self.MAX_ROTATION
elif self.tilt > -90:
self.tilt -= self.ROTATION_VELOCITY
def draw(self, win):
self.img_count += 1
# Bird tilting up
if self.img_count < self.ANIMATION_TIME:
self.img = self.IMGS[0]
elif self.img_count < self.ANIMATION_TIME*2:
self.img = self.IMGS[1]
elif self.img_count < self.ANIMATION_TIME*3:
self.img = self.IMGS[2]
# Bird tilting down
elif self.img_count < self.ANIMATION_TIME*4:
self.img = self.IMGS[1]
elif self.img_count < self.ANIMATION_TIME*5:
self.img = self.IMGS[0]
self.img_count = 0
# Straight dunk while coming down
if self.tilt <= -75:
self.img = self.IMGS[1]
self.img_count = self.ANIMATION_TIME*2
# Now we actually rotate the image
rotated_image = pygame.transform.rotate(self.img, self.tilt)
bird_rectangle = rotated_image.get_rect(
center=self.img.get_rect(topleft=(self.x, self.y)).center)
win.blit(rotated_image, bird_rectangle)
def get_mask(self):
return pygame.mask.from_surface(self.img)
class Pipe:
GAP = 200
VELOCITY = 5
def __init__(self, x):
self.x = x
self.height = 0
self.top = 0
self.bottom = 0
self.PIPE_BOTTOM = PIPE_IMG
self.PIPE_TOP = pygame.transform.flip(PIPE_IMG, False, True)
self.passed = False
self.set_height()
def set_height(self):
self.height = random.randrange(50, 450)
self.top = self.height - self.PIPE_TOP.get_height()
self.bottom = self.height + self.GAP
def move(self):
self.x -= self.VELOCITY
def draw(self, win):
win.blit(self.PIPE_TOP, (self.x, self.top))
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird):
bird_mask = bird.get_mask()
top_pipe_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_pipe_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_pipe_offset = (self.x - bird.x, self.top - round(bird.y))
bottom_pipe_offset = (self.x - bird.x, self.bottom - round(bird.y))
top_collision_point = bird_mask.overlap(top_pipe_mask, top_pipe_offset)
bottom_collision_point = bird_mask.overlap(
bottom_pipe_mask, bottom_pipe_offset)
if top_collision_point or bottom_collision_point:
return True
return False
class Base:
VELOCITY = 5
IMG = BASE_IMG
WIDTH = IMG.get_width()
def __init__(self, y):
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self):
self.x1 -= self.VELOCITY
self.x2 -= self.VELOCITY
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
def draw_window(win, birds, pipes, base, score):
win.blit(BACK_IMG, (0, 0))
for pipe in pipes:
pipe.draw(win)
text = STAT_FONT.render("Score: " + str(score), 1, (255, 255, 255))
win.blit(text, (WIN_WIDTH - 10 - text.get_width(), 10))
base.draw(win)
for bird in birds:
bird.draw(win)
pygame.display.update()
def eval_genome(genomes, config):
birds = []
nets = []
ge = []
for _, genome in genomes:
net = neat.nn.FeedForwardNetwork.create(genome, config)
nets.append(net)
birds.append(Bird(230, 350))
genome.fitness = 0
ge.append(genome)
base = Base(730)
pipes = [Pipe(700)]
win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
clock = pygame.time.Clock()
score = 0
run = True
while run:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
# Checking if the bird is behind the first pipe
pipe_index = 0
if len(birds) > 0:
# Incrementing the pipe index when crossed
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():
pipe_index = 1
else:
run = False
break
for x, bird in enumerate(birds):
bird.move()
ge[x].fitness = 0.1
# We'll check value of output. If the value increases the threshold then the bird jumps
output = nets[x].activate((bird.y, abs(
bird.y - pipes[pipe_index].height), abs(bird.y - pipes[pipe_index].bottom)))
if output[0] > 0.5:
bird.jump()
add_pipe = False
rem = []
for pipe in pipes:
for one, bird in enumerate(birds):
if pipe.collide(bird):
ge[one].fitness -= 1
# Actually removing the bird and its properties
birds.pop(one)
nets.pop(one)
ge.pop(one)
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
rem.append(pipe)
pipe.move()
if add_pipe:
score += 1
# Bird crossing the pipe will get extra credit
# These are genomes for actually alive birds
for g in ge:
g.fitness += 5
pipes.append(Pipe(600))
for r in rem:
pipes.remove(r)
for dead, bird in enumerate(birds):
if bird.y + bird.img.get_height() >= 730 or bird.y < 0:
# Removing the birds when they actually hit the ground
birds.pop(dead)
nets.pop(dead)
ge.pop(dead)
base.move()
draw_window(win, birds, pipes, base, score)
def run(config_path):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
population = neat.Population(config)
population.add_reporter(neat.StdOutReporter(True))
population.add_reporter(neat.StatisticsReporter())
winner = population.run(eval_genome, 30)
if __name__ == "__main__":
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, "FlappyBird-NEAT.config")
run(config_path)
|
import os
import random
import time
import neat
import pygame
WIN_WIDTH = 500
WIN_HEIGHT = 800
BIRD_IMGS = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird1.png"))), pygame.transform.scale2x(
pygame.image.load(os.path.join("imgs", "bird2.png"))), pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird3.png")))]
PIPE_IMG = pygame.transform.scale2x(
pygame.image.load(os.path.join("imgs", "pipe.png")))
BASE_IMG = pygame.transform.scale2x(
pygame.image.load(os.path.join("imgs", "base.png")))
BACK_IMG = pygame.transform.scale2x(
pygame.image.load(os.path.join("imgs", "bg.png")))
pygame.font.init()
STAT_FONT = pygame.font.SysFont("comicsans", 50)
class Bird:
IMGS = BIRD_IMGS
MAX_ROTATION = 25
ROTATION_VELOCITY = 20
ANIMATION_TIME = 5
def __init__(self, x, y):
self.x = x
self.y = y
self.tilt = 0
self.tick_count = 0
self.velocity = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self):
self.velocity = -10.5
self.tick_count = 0
self.height = self.y
def move(self):
self.tick_count += 1
distance = self.velocity*self.tick_count + 1.5*self.tick_count**2
if distance >= 16:
distance = 16
if distance < 0:
distance -= 2
self.y = self.y + distance
# Setting tilt to fixed value upon reaching max height
if distance < 0 or self.y < self.height + 50:
if self.tilt < self.MAX_ROTATION:
self.tilt = self.MAX_ROTATION
elif self.tilt > -90:
self.tilt -= self.ROTATION_VELOCITY
def draw(self, win):
self.img_count += 1
# Bird tilting up
if self.img_count < self.ANIMATION_TIME:
self.img = self.IMGS[0]
elif self.img_count < self.ANIMATION_TIME*2:
self.img = self.IMGS[1]
elif self.img_count < self.ANIMATION_TIME*3:
self.img = self.IMGS[2]
# Bird tilting down
elif self.img_count < self.ANIMATION_TIME*4:
self.img = self.IMGS[1]
elif self.img_count < self.ANIMATION_TIME*5:
self.img = self.IMGS[0]
self.img_count = 0
# Straight dunk while coming down
if self.tilt <= -75:
self.img = self.IMGS[1]
self.img_count = self.ANIMATION_TIME*2
# Now we actually rotate the image
rotated_image = pygame.transform.rotate(self.img, self.tilt)
bird_rectangle = rotated_image.get_rect(
center=self.img.get_rect(topleft=(self.x, self.y)).center)
win.blit(rotated_image, bird_rectangle)
def get_mask(self):
return pygame.mask.from_surface(self.img)
class Pipe:
GAP = 200
VELOCITY = 5
def __init__(self, x):
self.x = x
self.height = 0
self.top = 0
self.bottom = 0
self.PIPE_BOTTOM = PIPE_IMG
self.PIPE_TOP = pygame.transform.flip(PIPE_IMG, False, True)
self.passed = False
self.set_height()
def set_height(self):
self.height = random.randrange(50, 450)
self.top = self.height - self.PIPE_TOP.get_height()
self.bottom = self.height + self.GAP
def move(self):
self.x -= self.VELOCITY
def draw(self, win):
win.blit(self.PIPE_TOP, (self.x, self.top))
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird):
bird_mask = bird.get_mask()
top_pipe_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_pipe_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_pipe_offset = (self.x - bird.x, self.top - round(bird.y))
bottom_pipe_offset = (self.x - bird.x, self.bottom - round(bird.y))
top_collision_point = bird_mask.overlap(top_pipe_mask, top_pipe_offset)
bottom_collision_point = bird_mask.overlap(
bottom_pipe_mask, bottom_pipe_offset)
if top_collision_point or bottom_collision_point:
return True
return False
class Base:
VELOCITY = 5
IMG = BASE_IMG
WIDTH = IMG.get_width()
def __init__(self, y):
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self):
self.x1 -= self.VELOCITY
self.x2 -= self.VELOCITY
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
def draw_window(win, birds, pipes, base, score):
win.blit(BACK_IMG, (0, 0))
for pipe in pipes:
pipe.draw(win)
text = STAT_FONT.render("Score: " + str(score), 1, (255, 255, 255))
win.blit(text, (WIN_WIDTH - 10 - text.get_width(), 10))
base.draw(win)
for bird in birds:
bird.draw(win)
pygame.display.update()
def eval_genome(genomes, config):
birds = []
nets = []
ge = []
for _, genome in genomes:
net = neat.nn.FeedForwardNetwork.create(genome, config)
nets.append(net)
birds.append(Bird(230, 350))
genome.fitness = 0
ge.append(genome)
base = Base(730)
pipes = [Pipe(700)]
win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
clock = pygame.time.Clock()
score = 0
run = True
while run:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
# Checking if the bird is behind the first pipe
pipe_index = 0
if len(birds) > 0:
# Incrementing the pipe index when crossed
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():
pipe_index = 1
else:
run = False
break
for x, bird in enumerate(birds):
bird.move()
ge[x].fitness = 0.1
# We'll check value of output. If the value increases the threshold then the bird jumps
output = nets[x].activate((bird.y, abs(
bird.y - pipes[pipe_index].height), abs(bird.y - pipes[pipe_index].bottom)))
if output[0] > 0.5:
bird.jump()
add_pipe = False
rem = []
for pipe in pipes:
for one, bird in enumerate(birds):
if pipe.collide(bird):
ge[one].fitness -= 1
# Actually removing the bird and its properties
birds.pop(one)
nets.pop(one)
ge.pop(one)
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
rem.append(pipe)
pipe.move()
if add_pipe:
score += 1
# Bird crossing the pipe will get extra credit
# These are genomes for actually alive birds
for g in ge:
g.fitness += 5
pipes.append(Pipe(600))
for r in rem:
pipes.remove(r)
for dead, bird in enumerate(birds):
if bird.y + bird.img.get_height() >= 730 or bird.y < 0:
# Removing the birds when they actually hit the ground
birds.pop(dead)
nets.pop(dead)
ge.pop(dead)
base.move()
draw_window(win, birds, pipes, base, score)
def run(config_path):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
population = neat.Population(config)
population.add_reporter(neat.StdOutReporter(True))
population.add_reporter(neat.StatisticsReporter())
winner = population.run(eval_genome, 30)
if __name__ == "__main__":
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, "FlappyBird-NEAT.config")
run(config_path)
|
en
| 0.801643
|
# Setting tilt to fixed value upon reaching max height # Bird tilting up # Bird tilting down # Straight dunk while coming down # Now we actually rotate the image # Checking if the bird is behind the first pipe # Incrementing the pipe index when crossed # We'll check value of output. If the value increases the threshold then the bird jumps # Actually removing the bird and its properties # Bird crossing the pipe will get extra credit # These are genomes for actually alive birds # Removing the birds when they actually hit the ground
| 2.952604
| 3
|
examples/echo/client.py
|
zTgx/hoopServer
| 3
|
6629690
|
import http.client
conn = http.client.HTTPConnection("127.0.0.1:9527")
conn.request("GET", '/api/v2/echo')
res = conn.getresponse()
print(res.read())
conn.close()
|
import http.client
conn = http.client.HTTPConnection("127.0.0.1:9527")
conn.request("GET", '/api/v2/echo')
res = conn.getresponse()
print(res.read())
conn.close()
|
none
| 1
| 2.422332
| 2
|
|
rasa/core/processor.py
|
teresaflaherty/rasa
| 0
|
6629691
|
import logging
import os
import time
from types import LambdaType
from typing import Any, Dict, List, Optional, Text, Tuple, Union
import rasa.shared.utils.io
import rasa.core.actions.action
from rasa.core import jobs
from rasa.core.actions.action import Action
from rasa.core.channels.channel import (
CollectingOutputChannel,
OutputChannel,
UserMessage,
)
import rasa.core.utils
from rasa.core.policies.policy import PolicyPrediction
from rasa.shared.core.constants import (
USER_INTENT_RESTART,
ACTION_LISTEN_NAME,
ACTION_SESSION_START_NAME,
REQUESTED_SLOT,
SLOTS,
FOLLOWUP_ACTION,
)
from rasa.shared.core.domain import Domain
from rasa.shared.core.events import (
ActionExecuted,
ActionExecutionRejected,
BotUttered,
Event,
ReminderCancelled,
ReminderScheduled,
SlotSet,
UserUttered,
)
from rasa.shared.nlu.interpreter import NaturalLanguageInterpreter, RegexInterpreter
from rasa.shared.constants import (
INTENT_MESSAGE_PREFIX,
DOCS_URL_DOMAINS,
DEFAULT_SENDER_ID,
DOCS_URL_POLICIES,
UTTER_PREFIX,
)
from rasa.core.nlg import NaturalLanguageGenerator
from rasa.core.policies.ensemble import PolicyEnsemble
import rasa.core.tracker_store
import rasa.shared.core.trackers
from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity
from rasa.shared.nlu.constants import INTENT_NAME_KEY
from rasa.utils.endpoints import EndpointConfig
from rasa.core.emotion_determination import determine_bot_emotion
logger = logging.getLogger(__name__)
MAX_NUMBER_OF_PREDICTIONS = int(os.environ.get("MAX_NUMBER_OF_PREDICTIONS", "10"))
class MessageProcessor:
def __init__(
self,
interpreter: NaturalLanguageInterpreter,
policy_ensemble: PolicyEnsemble,
domain: Domain,
tracker_store: rasa.core.tracker_store.TrackerStore,
generator: NaturalLanguageGenerator,
action_endpoint: Optional[EndpointConfig] = None,
max_number_of_predictions: int = MAX_NUMBER_OF_PREDICTIONS,
message_preprocessor: Optional[LambdaType] = None,
on_circuit_break: Optional[LambdaType] = None,
):
self.interpreter = interpreter
self.nlg = generator
self.policy_ensemble = policy_ensemble
self.domain = domain
self.tracker_store = tracker_store
self.max_number_of_predictions = max_number_of_predictions
self.message_preprocessor = message_preprocessor
self.on_circuit_break = on_circuit_break
self.action_endpoint = action_endpoint
async def handle_message(
self, message: UserMessage,
) -> Optional[List[Dict[Text, Any]]]:
"""Handle a single message with this processor."""
# preprocess message if necessary
tracker = await self.log_message(message, should_save_tracker=False)
if not self.policy_ensemble or not self.domain:
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
rasa.shared.utils.io.raise_warning(
"No policy ensemble or domain set. Skipping action prediction "
"and execution.",
docs=DOCS_URL_POLICIES,
)
return None
await self._predict_and_execute_next_action(message.output_channel, tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
if isinstance(message.output_channel, CollectingOutputChannel):
return message.output_channel.messages
return None
async def predict_next(self, sender_id: Text) -> Optional[Dict[Text, Any]]:
"""Predict the next action for the current conversation state.
Args:
sender_id: Conversation ID.
Returns:
The prediction for the next action. `None` if no domain or policies loaded.
"""
# we have a Tracker instance for each user
# which maintains conversation state
tracker = await self.fetch_tracker_and_update_session(sender_id)
result = self.predict_next_with_tracker(tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return result
def predict_next_with_tracker(
self,
tracker: DialogueStateTracker,
verbosity: EventVerbosity = EventVerbosity.AFTER_RESTART,
) -> Optional[Dict[Text, Any]]:
"""Predict the next action for a given conversation state.
Args:
tracker: A tracker representing a conversation state.
verbosity: Verbosity for the returned conversation state.
Returns:
The prediction for the next action. `None` if no domain or policies loaded.
"""
if not self.policy_ensemble or not self.domain:
# save tracker state to continue conversation from this state
rasa.shared.utils.io.raise_warning(
"No policy ensemble or domain set. Skipping action prediction."
"You should set a policy before training a model.",
docs=DOCS_URL_POLICIES,
)
return None
prediction = self._get_next_action_probabilities(tracker)
scores = [
{"action": a, "score": p}
for a, p in zip(self.domain.action_names_or_texts, prediction.probabilities)
]
return {
"scores": scores,
"policy": prediction.policy_name,
"confidence": prediction.max_confidence,
"tracker": tracker.current_state(verbosity),
}
async def _update_tracker_session(
self,
tracker: DialogueStateTracker,
output_channel: OutputChannel,
metadata: Optional[Dict] = None,
) -> None:
"""Check the current session in `tracker` and update it if expired.
An 'action_session_start' is run if the latest tracker session has expired,
or if the tracker does not yet contain any events (only those after the last
restart are considered).
Args:
metadata: Data sent from client associated with the incoming user message.
tracker: Tracker to inspect.
output_channel: Output channel for potential utterances in a custom
`ActionSessionStart`.
"""
if not tracker.applied_events() or self._has_session_expired(tracker):
logger.debug(
f"Starting a new session for conversation ID '{tracker.sender_id}'."
)
await self._run_action(
action=self._get_action(ACTION_SESSION_START_NAME),
tracker=tracker,
output_channel=output_channel,
nlg=self.nlg,
metadata=metadata,
prediction=PolicyPrediction.for_action_name(
self.domain, ACTION_SESSION_START_NAME
),
)
async def fetch_tracker_and_update_session(
self,
sender_id: Text,
output_channel: Optional[OutputChannel] = None,
metadata: Optional[Dict] = None,
) -> DialogueStateTracker:
"""Fetches tracker for `sender_id` and updates its conversation session.
If a new tracker is created, `action_session_start` is run.
Args:
metadata: Data sent from client associated with the incoming user message.
output_channel: Output channel associated with the incoming user message.
sender_id: Conversation ID for which to fetch the tracker.
Returns:
Tracker for `sender_id`.
"""
tracker = self.get_tracker(sender_id)
await self._update_tracker_session(tracker, output_channel, metadata)
return tracker
async def fetch_tracker_with_initial_session(
self,
sender_id: Text,
output_channel: Optional[OutputChannel] = None,
metadata: Optional[Dict] = None,
) -> DialogueStateTracker:
"""Fetches tracker for `sender_id` and runs a session start if it's a new
tracker.
Args:
metadata: Data sent from client associated with the incoming user message.
output_channel: Output channel associated with the incoming user message.
sender_id: Conversation ID for which to fetch the tracker.
Returns:
Tracker for `sender_id`.
"""
tracker = self.get_tracker(sender_id)
# run session start only if the tracker is empty
if not tracker.events:
await self._update_tracker_session(tracker, output_channel, metadata)
return tracker
def get_tracker(self, conversation_id: Text) -> DialogueStateTracker:
"""Get the tracker for a conversation.
In contrast to `fetch_tracker_and_update_session` this does not add any
`action_session_start` or `session_start` events at the beginning of a
conversation.
Args:
conversation_id: The ID of the conversation for which the history should be
retrieved.
Returns:
Tracker for the conversation. Creates an empty tracker in case it's a new
conversation.
"""
conversation_id = conversation_id or DEFAULT_SENDER_ID
return self.tracker_store.get_or_create_tracker(
conversation_id, append_action_listen=False
)
def get_trackers_for_all_conversation_sessions(
self, conversation_id: Text
) -> List[DialogueStateTracker]:
"""Fetches all trackers for a conversation.
Individual trackers are returned for each conversation session found
for `conversation_id`.
Args:
conversation_id: The ID of the conversation for which the trackers should
be retrieved.
Returns:
Trackers for the conversation.
"""
conversation_id = conversation_id or DEFAULT_SENDER_ID
tracker = self.tracker_store.retrieve_full_tracker(conversation_id)
return rasa.shared.core.trackers.get_trackers_for_conversation_sessions(tracker)
async def log_message(
self, message: UserMessage, should_save_tracker: bool = True
) -> DialogueStateTracker:
"""Log `message` on tracker belonging to the message's conversation_id.
Optionally save the tracker if `should_save_tracker` is `True`. Tracker saving
can be skipped if the tracker returned by this method is used for further
processing and saved at a later stage.
"""
# we have a Tracker instance for each user
# which maintains conversation state
tracker = await self.fetch_tracker_and_update_session(
message.sender_id, message.output_channel, message.metadata
)
await self._handle_message_with_tracker(message, tracker)
if should_save_tracker:
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return tracker
async def execute_action(
self,
sender_id: Text,
action_name: Text,
output_channel: OutputChannel,
nlg: NaturalLanguageGenerator,
prediction: PolicyPrediction,
) -> Optional[DialogueStateTracker]:
"""Execute an action for a conversation.
Note that this might lead to unexpected bot behavior. Rather use an intent
to execute certain behavior within a conversation (e.g. by using
`trigger_external_user_uttered`).
Args:
sender_id: The ID of the conversation.
action_name: The name of the action which should be executed.
output_channel: The output channel which should be used for bot responses.
nlg: The response generator.
prediction: The prediction for the action.
Returns:
The new conversation state. Note that the new state is also persisted.
"""
# we have a Tracker instance for each user
# which maintains conversation state
tracker = await self.fetch_tracker_and_update_session(sender_id, output_channel)
action = self._get_action(action_name)
await self._run_action(action, tracker, output_channel, nlg, prediction)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return tracker
def predict_next_action(
self, tracker: DialogueStateTracker
) -> Tuple[rasa.core.actions.action.Action, PolicyPrediction]:
"""Predicts the next action the bot should take after seeing x.
This should be overwritten by more advanced policies to use
ML to predict the action. Returns the index of the next action.
"""
prediction = self._get_next_action_probabilities(tracker)
action = rasa.core.actions.action.action_for_index(
prediction.max_confidence_index, self.domain, self.action_endpoint
)
logger.debug(
f"Predicted next action '{action.name()}' with confidence "
f"{prediction.max_confidence:.2f}."
)
return action, prediction
@staticmethod
def _is_reminder(e: Event, name: Text) -> bool:
return isinstance(e, ReminderScheduled) and e.name == name
@staticmethod
def _is_reminder_still_valid(
tracker: DialogueStateTracker, reminder_event: ReminderScheduled
) -> bool:
"""Check if the conversation has been restarted after reminder."""
for e in reversed(tracker.applied_events()):
if MessageProcessor._is_reminder(e, reminder_event.name):
return True
return False # not found in applied events --> has been restarted
@staticmethod
def _has_message_after_reminder(
tracker: DialogueStateTracker, reminder_event: ReminderScheduled
) -> bool:
"""Check if the user sent a message after the reminder."""
for e in reversed(tracker.events):
if MessageProcessor._is_reminder(e, reminder_event.name):
return False
if isinstance(e, UserUttered) and e.text:
return True
return True # tracker has probably been restarted
async def handle_reminder(
self,
reminder_event: ReminderScheduled,
sender_id: Text,
output_channel: OutputChannel,
) -> None:
"""Handle a reminder that is triggered asynchronously."""
tracker = await self.fetch_tracker_and_update_session(sender_id, output_channel)
if (
reminder_event.kill_on_user_message
and self._has_message_after_reminder(tracker, reminder_event)
or not self._is_reminder_still_valid(tracker, reminder_event)
):
logger.debug(
f"Canceled reminder because it is outdated ({reminder_event})."
)
else:
intent = reminder_event.intent
entities = reminder_event.entities or {}
await self.trigger_external_user_uttered(
intent, entities, tracker, output_channel
)
async def trigger_external_user_uttered(
self,
intent_name: Text,
entities: Optional[Union[List[Dict[Text, Any]], Dict[Text, Text]]],
tracker: DialogueStateTracker,
output_channel: OutputChannel,
) -> None:
"""Triggers an external message.
Triggers an external message (like a user message, but invisible;
used, e.g., by a reminder or the trigger_intent endpoint).
Args:
intent_name: Name of the intent to be triggered.
entities: Entities to be passed on.
tracker: The tracker to which the event should be added.
output_channel: The output channel.
"""
if isinstance(entities, list):
entity_list = entities
elif isinstance(entities, dict):
# Allow for a short-hand notation {"ent1": "val1", "ent2": "val2", ...}.
# Useful if properties like 'start', 'end', or 'extractor' are not given,
# e.g. for external events.
entity_list = [
{"entity": ent, "value": val} for ent, val in entities.items()
]
elif not entities:
entity_list = []
else:
rasa.shared.utils.io.raise_warning(
f"Invalid entity specification: {entities}. Assuming no entities."
)
entity_list = []
# Set the new event's input channel to the latest input channel, so
# that we don't lose this property.
input_channel = tracker.get_latest_input_channel()
tracker.update(
UserUttered.create_external(intent_name, entity_list, input_channel)
)
await self._predict_and_execute_next_action(output_channel, tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
@staticmethod
def _log_slots(tracker) -> None:
# Log currently set slots
slot_values = "\n".join(
[f"\t{s.name}: {s.value}" for s in tracker.slots.values()]
)
if slot_values.strip():
logger.debug(f"Current slot values: \n{slot_values}")
def _check_for_unseen_features(self, parse_data: Dict[Text, Any]) -> None:
"""Warns the user if the NLU parse data contains unrecognized features.
Checks intents and entities picked up by the NLU interpreter
against the domain and warns the user of those that don't match.
Also considers a list of default intents that are valid but don't
need to be listed in the domain.
Args:
parse_data: NLUInterpreter parse data to check against the domain.
"""
if not self.domain or self.domain.is_empty():
return
intent = parse_data["intent"][INTENT_NAME_KEY]
if intent and intent not in self.domain.intents:
rasa.shared.utils.io.raise_warning(
f"Interpreter parsed an intent '{intent}' "
f"which is not defined in the domain. "
f"Please make sure all intents are listed in the domain.",
docs=DOCS_URL_DOMAINS,
)
entities = parse_data["entities"] or []
for element in entities:
entity = element["entity"]
if entity and entity not in self.domain.entities:
rasa.shared.utils.io.raise_warning(
f"Interpreter parsed an entity '{entity}' "
f"which is not defined in the domain. "
f"Please make sure all entities are listed in the domain.",
docs=DOCS_URL_DOMAINS,
)
def _get_action(self, action_name) -> Optional[rasa.core.actions.action.Action]:
return rasa.core.actions.action.action_for_name_or_text(
action_name, self.domain, self.action_endpoint
)
async def parse_message(
self, message: UserMessage, tracker: Optional[DialogueStateTracker] = None
) -> Dict[Text, Any]:
"""Interprete the passed message using the NLU interpreter.
Arguments:
message: Message to handle
emotional_matrix: Emotional matrix switch, will use Gabriel's Emotional Emulator if True
tracker: Dialogue context of the message
Returns:
Parsed data extracted from the message.
"""
# preprocess message if necessary
if self.message_preprocessor is not None:
text = self.message_preprocessor(message.text)
else:
text = message.text
logging.info(f"User message: {text}")
# process bot and user emotions
user_emotion, bot_emotion = determine_bot_emotion(text, message.emotional_matrix)
# for testing - you can short-cut the NLU part with a message
# in the format /intent{"entity1": val1, "entity2": val2}
# parse_data is a dict of intent & entities
if text.startswith(INTENT_MESSAGE_PREFIX):
parse_data = await RegexInterpreter().parse(
text, message.message_id, tracker
)
else:
parse_data = await self.interpreter.parse(
text, message.message_id, tracker, metadata=message.metadata
)
logger.debug(
"Received user message '{}' with intent '{}' "
"and entities '{}'".format(
message.text, parse_data["intent"], parse_data["entities"]
)
)
parse_data["user_emotion"] = user_emotion
parse_data["bot_emotion"] = bot_emotion
self._check_for_unseen_features(parse_data)
return parse_data
async def _handle_message_with_tracker(
self, message: UserMessage, tracker: DialogueStateTracker
) -> None:
if message.parse_data:
parse_data = message.parse_data
else:
parse_data = await self.parse_message(message, tracker)
# don't ever directly mutate the tracker
# - instead pass its events to log
tracker.update(
UserUttered(
message.text,
parse_data["intent"],
parse_data["entities"],
parse_data,
input_channel=message.input_channel,
message_id=message.message_id,
metadata=message.metadata,
),
self.domain,
)
if parse_data["entities"]:
self._log_slots(tracker)
logger.debug(
f"Logged UserUtterance - tracker now has {len(tracker.events)} events."
)
@staticmethod
def _should_handle_message(tracker: DialogueStateTracker):
return (
not tracker.is_paused()
or tracker.latest_message.intent.get(INTENT_NAME_KEY) == USER_INTENT_RESTART
)
def is_action_limit_reached(
self, num_predicted_actions: int, should_predict_another_action: bool
) -> bool:
"""Check whether the maximum number of predictions has been met.
Args:
num_predicted_actions: Number of predicted actions.
should_predict_another_action: Whether the last executed action allows
for more actions to be predicted or not.
Returns:
`True` if the limit of actions to predict has been reached.
"""
return (
num_predicted_actions >= self.max_number_of_predictions
and should_predict_another_action
)
async def _predict_and_execute_next_action(
self, output_channel: OutputChannel, tracker: DialogueStateTracker
):
# keep taking actions decided by the policy until it chooses to 'listen'
should_predict_another_action = True
num_predicted_actions = 0
# action loop. predicts actions until we hit action listen
while (
should_predict_another_action
and self._should_handle_message(tracker)
and num_predicted_actions < self.max_number_of_predictions
):
# this actually just calls the policy's method by the same name
action, prediction = self.predict_next_action(tracker)
should_predict_another_action = await self._run_action(
action, tracker, output_channel, self.nlg, prediction
)
num_predicted_actions += 1
if self.is_action_limit_reached(
num_predicted_actions, should_predict_another_action
):
# circuit breaker was tripped
logger.warning(
"Circuit breaker tripped. Stopped predicting "
f"more actions for sender '{tracker.sender_id}'."
)
if self.on_circuit_break:
# call a registered callback
self.on_circuit_break(tracker, output_channel, self.nlg)
@staticmethod
def should_predict_another_action(action_name: Text) -> bool:
"""Determine whether the processor should predict another action.
Args:
action_name: Name of the latest executed action.
Returns:
`False` if `action_name` is `ACTION_LISTEN_NAME` or
`ACTION_SESSION_START_NAME`, otherwise `True`.
"""
return action_name not in (ACTION_LISTEN_NAME, ACTION_SESSION_START_NAME)
async def execute_side_effects(
self,
events: List[Event],
tracker: DialogueStateTracker,
output_channel: OutputChannel,
) -> None:
"""Send bot messages, schedule and cancel reminders that are logged
in the events array."""
await self._send_bot_messages(events, tracker, output_channel)
await self._schedule_reminders(events, tracker, output_channel)
await self._cancel_reminders(events, tracker)
@staticmethod
async def _send_bot_messages(
events: List[Event],
tracker: DialogueStateTracker,
output_channel: OutputChannel,
) -> None:
"""Send all the bot messages that are logged in the events array."""
for e in events:
if not isinstance(e, BotUttered):
continue
await output_channel.send_response(tracker.sender_id, e.message())
async def _schedule_reminders(
self,
events: List[Event],
tracker: DialogueStateTracker,
output_channel: OutputChannel,
) -> None:
"""Uses the scheduler to time a job to trigger the passed reminder.
Reminders with the same `id` property will overwrite one another
(i.e. only one of them will eventually run).
"""
for e in events:
if not isinstance(e, ReminderScheduled):
continue
(await jobs.scheduler()).add_job(
self.handle_reminder,
"date",
run_date=e.trigger_date_time,
args=[e, tracker.sender_id, output_channel],
id=e.name,
replace_existing=True,
name=e.scheduled_job_name(tracker.sender_id),
)
@staticmethod
async def _cancel_reminders(
events: List[Event], tracker: DialogueStateTracker
) -> None:
"""Cancel reminders that match the `ReminderCancelled` event."""
# All Reminders specified by ReminderCancelled events will be cancelled
for event in events:
if isinstance(event, ReminderCancelled):
scheduler = await jobs.scheduler()
for scheduled_job in scheduler.get_jobs():
if event.cancels_job_with_name(
scheduled_job.name, tracker.sender_id
):
scheduler.remove_job(scheduled_job.id)
async def _run_action(
self,
action: rasa.core.actions.action.Action,
tracker: DialogueStateTracker,
output_channel: OutputChannel,
nlg: NaturalLanguageGenerator,
prediction: PolicyPrediction,
metadata: Optional[Dict[Text, Any]] = None,
) -> bool:
# events and return values are used to update
# the tracker state after an action has been taken
try:
# Here we set optional metadata to the ActionSessionStart, which will then
# be passed to the SessionStart event. Otherwise the metadata will be lost.
if action.name() == ACTION_SESSION_START_NAME:
action.metadata = metadata
# Use temporary tracker as we might need to discard the policy events in
# case of a rejection.
temporary_tracker = tracker.copy()
temporary_tracker.update_with_events(prediction.events, self.domain)
events = await action.run(
output_channel, nlg, temporary_tracker, self.domain
)
except rasa.core.actions.action.ActionExecutionRejection:
events = [
ActionExecutionRejected(
action.name(), prediction.policy_name, prediction.max_confidence
)
]
tracker.update(events[0])
return self.should_predict_another_action(action.name())
except Exception:
logger.exception(
f"Encountered an exception while running action '{action.name()}'."
"Bot will continue, but the actions events are lost. "
"Please check the logs of your action server for "
"more information."
)
events = []
self._log_action_on_tracker(tracker, action, events, prediction)
if action.name() != ACTION_LISTEN_NAME and not action.name().startswith(
UTTER_PREFIX
):
self._log_slots(tracker)
await self.execute_side_effects(events, tracker, output_channel)
return self.should_predict_another_action(action.name())
def _warn_about_new_slots(self, tracker, action_name, events) -> None:
# these are the events from that action we have seen during training
if (
not self.policy_ensemble
or action_name not in self.policy_ensemble.action_fingerprints
):
return
fp = self.policy_ensemble.action_fingerprints[action_name]
slots_seen_during_train = fp.get(SLOTS, set())
for e in events:
if isinstance(e, SlotSet) and e.key not in slots_seen_during_train:
s = tracker.slots.get(e.key)
if s and s.has_features():
if e.key == REQUESTED_SLOT and tracker.active_loop:
pass
else:
rasa.shared.utils.io.raise_warning(
f"Action '{action_name}' set a slot type '{e.key}' which "
f"it never set during the training. This "
f"can throw off the prediction. Make sure to "
f"include training examples in your stories "
f"for the different types of slots this "
f"action can return. Remember: you need to "
f"set the slots manually in the stories by "
f"adding '- slot{{\"{e.key}\": {e.value}}}' "
f"after the action."
)
def _log_action_on_tracker(
self,
tracker: DialogueStateTracker,
action: Action,
events: Optional[List[Event]],
prediction: PolicyPrediction,
) -> None:
# Ensures that the code still works even if a lazy programmer missed
# to type `return []` at the end of an action or the run method
# returns `None` for some other reason.
if events is None:
events = []
self._warn_about_new_slots(tracker, action.name(), events)
action_was_rejected_manually = any(
isinstance(event, ActionExecutionRejected) for event in events
)
if not action_was_rejected_manually:
logger.debug(f"Policy prediction ended with events '{prediction.events}'.")
tracker.update_with_events(prediction.events, self.domain)
# log the action and its produced events
tracker.update(action.event_for_successful_execution(prediction))
logger.debug(f"Action '{action.name()}' ended with events '{events}'.")
tracker.update_with_events(events, self.domain)
def _has_session_expired(self, tracker: DialogueStateTracker) -> bool:
"""Determine whether the latest session in `tracker` has expired.
Args:
tracker: Tracker to inspect.
Returns:
`True` if the session in `tracker` has expired, `False` otherwise.
"""
if not self.domain.session_config.are_sessions_enabled():
# tracker has never expired if sessions are disabled
return False
user_uttered_event: Optional[UserUttered] = tracker.get_last_event_for(
UserUttered
)
if not user_uttered_event:
# there is no user event so far so the session should not be considered
# expired
return False
time_delta_in_seconds = time.time() - user_uttered_event.timestamp
has_expired = (
time_delta_in_seconds / 60
> self.domain.session_config.session_expiration_time
)
if has_expired:
logger.debug(
f"The latest session for conversation ID '{tracker.sender_id}' has "
f"expired."
)
return has_expired
def _save_tracker(self, tracker: DialogueStateTracker) -> None:
self.tracker_store.save(tracker)
def _get_next_action_probabilities(
self, tracker: DialogueStateTracker
) -> PolicyPrediction:
"""Collect predictions from ensemble and return action and predictions."""
followup_action = tracker.followup_action
if followup_action:
tracker.clear_followup_action()
if followup_action in self.domain.action_names_or_texts:
return PolicyPrediction.for_action_name(
self.domain, followup_action, FOLLOWUP_ACTION
)
logger.error(
f"Trying to run unknown follow-up action '{followup_action}'. "
"Instead of running that, Rasa Open Source will ignore the action "
"and predict the next action."
)
prediction = self.policy_ensemble.probabilities_using_best_policy(
tracker, self.domain, self.interpreter
)
if isinstance(prediction, PolicyPrediction):
return prediction
rasa.shared.utils.io.raise_deprecation_warning(
f"Returning a tuple of probabilities and policy name for "
f"`{PolicyEnsemble.probabilities_using_best_policy.__name__}` is "
f"deprecated and will be removed in Rasa Open Source 3.0.0. Please return "
f"a `{PolicyPrediction.__name__}` object instead."
)
probabilities, policy_name = prediction
return PolicyPrediction(probabilities, policy_name)
|
import logging
import os
import time
from types import LambdaType
from typing import Any, Dict, List, Optional, Text, Tuple, Union
import rasa.shared.utils.io
import rasa.core.actions.action
from rasa.core import jobs
from rasa.core.actions.action import Action
from rasa.core.channels.channel import (
CollectingOutputChannel,
OutputChannel,
UserMessage,
)
import rasa.core.utils
from rasa.core.policies.policy import PolicyPrediction
from rasa.shared.core.constants import (
USER_INTENT_RESTART,
ACTION_LISTEN_NAME,
ACTION_SESSION_START_NAME,
REQUESTED_SLOT,
SLOTS,
FOLLOWUP_ACTION,
)
from rasa.shared.core.domain import Domain
from rasa.shared.core.events import (
ActionExecuted,
ActionExecutionRejected,
BotUttered,
Event,
ReminderCancelled,
ReminderScheduled,
SlotSet,
UserUttered,
)
from rasa.shared.nlu.interpreter import NaturalLanguageInterpreter, RegexInterpreter
from rasa.shared.constants import (
INTENT_MESSAGE_PREFIX,
DOCS_URL_DOMAINS,
DEFAULT_SENDER_ID,
DOCS_URL_POLICIES,
UTTER_PREFIX,
)
from rasa.core.nlg import NaturalLanguageGenerator
from rasa.core.policies.ensemble import PolicyEnsemble
import rasa.core.tracker_store
import rasa.shared.core.trackers
from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity
from rasa.shared.nlu.constants import INTENT_NAME_KEY
from rasa.utils.endpoints import EndpointConfig
from rasa.core.emotion_determination import determine_bot_emotion
logger = logging.getLogger(__name__)
MAX_NUMBER_OF_PREDICTIONS = int(os.environ.get("MAX_NUMBER_OF_PREDICTIONS", "10"))
class MessageProcessor:
def __init__(
self,
interpreter: NaturalLanguageInterpreter,
policy_ensemble: PolicyEnsemble,
domain: Domain,
tracker_store: rasa.core.tracker_store.TrackerStore,
generator: NaturalLanguageGenerator,
action_endpoint: Optional[EndpointConfig] = None,
max_number_of_predictions: int = MAX_NUMBER_OF_PREDICTIONS,
message_preprocessor: Optional[LambdaType] = None,
on_circuit_break: Optional[LambdaType] = None,
):
self.interpreter = interpreter
self.nlg = generator
self.policy_ensemble = policy_ensemble
self.domain = domain
self.tracker_store = tracker_store
self.max_number_of_predictions = max_number_of_predictions
self.message_preprocessor = message_preprocessor
self.on_circuit_break = on_circuit_break
self.action_endpoint = action_endpoint
async def handle_message(
self, message: UserMessage,
) -> Optional[List[Dict[Text, Any]]]:
"""Handle a single message with this processor."""
# preprocess message if necessary
tracker = await self.log_message(message, should_save_tracker=False)
if not self.policy_ensemble or not self.domain:
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
rasa.shared.utils.io.raise_warning(
"No policy ensemble or domain set. Skipping action prediction "
"and execution.",
docs=DOCS_URL_POLICIES,
)
return None
await self._predict_and_execute_next_action(message.output_channel, tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
if isinstance(message.output_channel, CollectingOutputChannel):
return message.output_channel.messages
return None
async def predict_next(self, sender_id: Text) -> Optional[Dict[Text, Any]]:
"""Predict the next action for the current conversation state.
Args:
sender_id: Conversation ID.
Returns:
The prediction for the next action. `None` if no domain or policies loaded.
"""
# we have a Tracker instance for each user
# which maintains conversation state
tracker = await self.fetch_tracker_and_update_session(sender_id)
result = self.predict_next_with_tracker(tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return result
def predict_next_with_tracker(
self,
tracker: DialogueStateTracker,
verbosity: EventVerbosity = EventVerbosity.AFTER_RESTART,
) -> Optional[Dict[Text, Any]]:
"""Predict the next action for a given conversation state.
Args:
tracker: A tracker representing a conversation state.
verbosity: Verbosity for the returned conversation state.
Returns:
The prediction for the next action. `None` if no domain or policies loaded.
"""
if not self.policy_ensemble or not self.domain:
# save tracker state to continue conversation from this state
rasa.shared.utils.io.raise_warning(
"No policy ensemble or domain set. Skipping action prediction."
"You should set a policy before training a model.",
docs=DOCS_URL_POLICIES,
)
return None
prediction = self._get_next_action_probabilities(tracker)
scores = [
{"action": a, "score": p}
for a, p in zip(self.domain.action_names_or_texts, prediction.probabilities)
]
return {
"scores": scores,
"policy": prediction.policy_name,
"confidence": prediction.max_confidence,
"tracker": tracker.current_state(verbosity),
}
async def _update_tracker_session(
self,
tracker: DialogueStateTracker,
output_channel: OutputChannel,
metadata: Optional[Dict] = None,
) -> None:
"""Check the current session in `tracker` and update it if expired.
An 'action_session_start' is run if the latest tracker session has expired,
or if the tracker does not yet contain any events (only those after the last
restart are considered).
Args:
metadata: Data sent from client associated with the incoming user message.
tracker: Tracker to inspect.
output_channel: Output channel for potential utterances in a custom
`ActionSessionStart`.
"""
if not tracker.applied_events() or self._has_session_expired(tracker):
logger.debug(
f"Starting a new session for conversation ID '{tracker.sender_id}'."
)
await self._run_action(
action=self._get_action(ACTION_SESSION_START_NAME),
tracker=tracker,
output_channel=output_channel,
nlg=self.nlg,
metadata=metadata,
prediction=PolicyPrediction.for_action_name(
self.domain, ACTION_SESSION_START_NAME
),
)
async def fetch_tracker_and_update_session(
self,
sender_id: Text,
output_channel: Optional[OutputChannel] = None,
metadata: Optional[Dict] = None,
) -> DialogueStateTracker:
"""Fetches tracker for `sender_id` and updates its conversation session.
If a new tracker is created, `action_session_start` is run.
Args:
metadata: Data sent from client associated with the incoming user message.
output_channel: Output channel associated with the incoming user message.
sender_id: Conversation ID for which to fetch the tracker.
Returns:
Tracker for `sender_id`.
"""
tracker = self.get_tracker(sender_id)
await self._update_tracker_session(tracker, output_channel, metadata)
return tracker
async def fetch_tracker_with_initial_session(
self,
sender_id: Text,
output_channel: Optional[OutputChannel] = None,
metadata: Optional[Dict] = None,
) -> DialogueStateTracker:
"""Fetches tracker for `sender_id` and runs a session start if it's a new
tracker.
Args:
metadata: Data sent from client associated with the incoming user message.
output_channel: Output channel associated with the incoming user message.
sender_id: Conversation ID for which to fetch the tracker.
Returns:
Tracker for `sender_id`.
"""
tracker = self.get_tracker(sender_id)
# run session start only if the tracker is empty
if not tracker.events:
await self._update_tracker_session(tracker, output_channel, metadata)
return tracker
def get_tracker(self, conversation_id: Text) -> DialogueStateTracker:
"""Get the tracker for a conversation.
In contrast to `fetch_tracker_and_update_session` this does not add any
`action_session_start` or `session_start` events at the beginning of a
conversation.
Args:
conversation_id: The ID of the conversation for which the history should be
retrieved.
Returns:
Tracker for the conversation. Creates an empty tracker in case it's a new
conversation.
"""
conversation_id = conversation_id or DEFAULT_SENDER_ID
return self.tracker_store.get_or_create_tracker(
conversation_id, append_action_listen=False
)
def get_trackers_for_all_conversation_sessions(
self, conversation_id: Text
) -> List[DialogueStateTracker]:
"""Fetches all trackers for a conversation.
Individual trackers are returned for each conversation session found
for `conversation_id`.
Args:
conversation_id: The ID of the conversation for which the trackers should
be retrieved.
Returns:
Trackers for the conversation.
"""
conversation_id = conversation_id or DEFAULT_SENDER_ID
tracker = self.tracker_store.retrieve_full_tracker(conversation_id)
return rasa.shared.core.trackers.get_trackers_for_conversation_sessions(tracker)
async def log_message(
self, message: UserMessage, should_save_tracker: bool = True
) -> DialogueStateTracker:
"""Log `message` on tracker belonging to the message's conversation_id.
Optionally save the tracker if `should_save_tracker` is `True`. Tracker saving
can be skipped if the tracker returned by this method is used for further
processing and saved at a later stage.
"""
# we have a Tracker instance for each user
# which maintains conversation state
tracker = await self.fetch_tracker_and_update_session(
message.sender_id, message.output_channel, message.metadata
)
await self._handle_message_with_tracker(message, tracker)
if should_save_tracker:
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return tracker
async def execute_action(
self,
sender_id: Text,
action_name: Text,
output_channel: OutputChannel,
nlg: NaturalLanguageGenerator,
prediction: PolicyPrediction,
) -> Optional[DialogueStateTracker]:
"""Execute an action for a conversation.
Note that this might lead to unexpected bot behavior. Rather use an intent
to execute certain behavior within a conversation (e.g. by using
`trigger_external_user_uttered`).
Args:
sender_id: The ID of the conversation.
action_name: The name of the action which should be executed.
output_channel: The output channel which should be used for bot responses.
nlg: The response generator.
prediction: The prediction for the action.
Returns:
The new conversation state. Note that the new state is also persisted.
"""
# we have a Tracker instance for each user
# which maintains conversation state
tracker = await self.fetch_tracker_and_update_session(sender_id, output_channel)
action = self._get_action(action_name)
await self._run_action(action, tracker, output_channel, nlg, prediction)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
return tracker
def predict_next_action(
self, tracker: DialogueStateTracker
) -> Tuple[rasa.core.actions.action.Action, PolicyPrediction]:
"""Predicts the next action the bot should take after seeing x.
This should be overwritten by more advanced policies to use
ML to predict the action. Returns the index of the next action.
"""
prediction = self._get_next_action_probabilities(tracker)
action = rasa.core.actions.action.action_for_index(
prediction.max_confidence_index, self.domain, self.action_endpoint
)
logger.debug(
f"Predicted next action '{action.name()}' with confidence "
f"{prediction.max_confidence:.2f}."
)
return action, prediction
@staticmethod
def _is_reminder(e: Event, name: Text) -> bool:
return isinstance(e, ReminderScheduled) and e.name == name
@staticmethod
def _is_reminder_still_valid(
tracker: DialogueStateTracker, reminder_event: ReminderScheduled
) -> bool:
"""Check if the conversation has been restarted after reminder."""
for e in reversed(tracker.applied_events()):
if MessageProcessor._is_reminder(e, reminder_event.name):
return True
return False # not found in applied events --> has been restarted
@staticmethod
def _has_message_after_reminder(
tracker: DialogueStateTracker, reminder_event: ReminderScheduled
) -> bool:
"""Check if the user sent a message after the reminder."""
for e in reversed(tracker.events):
if MessageProcessor._is_reminder(e, reminder_event.name):
return False
if isinstance(e, UserUttered) and e.text:
return True
return True # tracker has probably been restarted
async def handle_reminder(
self,
reminder_event: ReminderScheduled,
sender_id: Text,
output_channel: OutputChannel,
) -> None:
"""Handle a reminder that is triggered asynchronously."""
tracker = await self.fetch_tracker_and_update_session(sender_id, output_channel)
if (
reminder_event.kill_on_user_message
and self._has_message_after_reminder(tracker, reminder_event)
or not self._is_reminder_still_valid(tracker, reminder_event)
):
logger.debug(
f"Canceled reminder because it is outdated ({reminder_event})."
)
else:
intent = reminder_event.intent
entities = reminder_event.entities or {}
await self.trigger_external_user_uttered(
intent, entities, tracker, output_channel
)
async def trigger_external_user_uttered(
self,
intent_name: Text,
entities: Optional[Union[List[Dict[Text, Any]], Dict[Text, Text]]],
tracker: DialogueStateTracker,
output_channel: OutputChannel,
) -> None:
"""Triggers an external message.
Triggers an external message (like a user message, but invisible;
used, e.g., by a reminder or the trigger_intent endpoint).
Args:
intent_name: Name of the intent to be triggered.
entities: Entities to be passed on.
tracker: The tracker to which the event should be added.
output_channel: The output channel.
"""
if isinstance(entities, list):
entity_list = entities
elif isinstance(entities, dict):
# Allow for a short-hand notation {"ent1": "val1", "ent2": "val2", ...}.
# Useful if properties like 'start', 'end', or 'extractor' are not given,
# e.g. for external events.
entity_list = [
{"entity": ent, "value": val} for ent, val in entities.items()
]
elif not entities:
entity_list = []
else:
rasa.shared.utils.io.raise_warning(
f"Invalid entity specification: {entities}. Assuming no entities."
)
entity_list = []
# Set the new event's input channel to the latest input channel, so
# that we don't lose this property.
input_channel = tracker.get_latest_input_channel()
tracker.update(
UserUttered.create_external(intent_name, entity_list, input_channel)
)
await self._predict_and_execute_next_action(output_channel, tracker)
# save tracker state to continue conversation from this state
self._save_tracker(tracker)
@staticmethod
def _log_slots(tracker) -> None:
# Log currently set slots
slot_values = "\n".join(
[f"\t{s.name}: {s.value}" for s in tracker.slots.values()]
)
if slot_values.strip():
logger.debug(f"Current slot values: \n{slot_values}")
def _check_for_unseen_features(self, parse_data: Dict[Text, Any]) -> None:
"""Warns the user if the NLU parse data contains unrecognized features.
Checks intents and entities picked up by the NLU interpreter
against the domain and warns the user of those that don't match.
Also considers a list of default intents that are valid but don't
need to be listed in the domain.
Args:
parse_data: NLUInterpreter parse data to check against the domain.
"""
if not self.domain or self.domain.is_empty():
return
intent = parse_data["intent"][INTENT_NAME_KEY]
if intent and intent not in self.domain.intents:
rasa.shared.utils.io.raise_warning(
f"Interpreter parsed an intent '{intent}' "
f"which is not defined in the domain. "
f"Please make sure all intents are listed in the domain.",
docs=DOCS_URL_DOMAINS,
)
entities = parse_data["entities"] or []
for element in entities:
entity = element["entity"]
if entity and entity not in self.domain.entities:
rasa.shared.utils.io.raise_warning(
f"Interpreter parsed an entity '{entity}' "
f"which is not defined in the domain. "
f"Please make sure all entities are listed in the domain.",
docs=DOCS_URL_DOMAINS,
)
def _get_action(self, action_name) -> Optional[rasa.core.actions.action.Action]:
return rasa.core.actions.action.action_for_name_or_text(
action_name, self.domain, self.action_endpoint
)
async def parse_message(
self, message: UserMessage, tracker: Optional[DialogueStateTracker] = None
) -> Dict[Text, Any]:
"""Interprete the passed message using the NLU interpreter.
Arguments:
message: Message to handle
emotional_matrix: Emotional matrix switch, will use Gabriel's Emotional Emulator if True
tracker: Dialogue context of the message
Returns:
Parsed data extracted from the message.
"""
# preprocess message if necessary
if self.message_preprocessor is not None:
text = self.message_preprocessor(message.text)
else:
text = message.text
logging.info(f"User message: {text}")
# process bot and user emotions
user_emotion, bot_emotion = determine_bot_emotion(text, message.emotional_matrix)
# for testing - you can short-cut the NLU part with a message
# in the format /intent{"entity1": val1, "entity2": val2}
# parse_data is a dict of intent & entities
if text.startswith(INTENT_MESSAGE_PREFIX):
parse_data = await RegexInterpreter().parse(
text, message.message_id, tracker
)
else:
parse_data = await self.interpreter.parse(
text, message.message_id, tracker, metadata=message.metadata
)
logger.debug(
"Received user message '{}' with intent '{}' "
"and entities '{}'".format(
message.text, parse_data["intent"], parse_data["entities"]
)
)
parse_data["user_emotion"] = user_emotion
parse_data["bot_emotion"] = bot_emotion
self._check_for_unseen_features(parse_data)
return parse_data
async def _handle_message_with_tracker(
self, message: UserMessage, tracker: DialogueStateTracker
) -> None:
if message.parse_data:
parse_data = message.parse_data
else:
parse_data = await self.parse_message(message, tracker)
# don't ever directly mutate the tracker
# - instead pass its events to log
tracker.update(
UserUttered(
message.text,
parse_data["intent"],
parse_data["entities"],
parse_data,
input_channel=message.input_channel,
message_id=message.message_id,
metadata=message.metadata,
),
self.domain,
)
if parse_data["entities"]:
self._log_slots(tracker)
logger.debug(
f"Logged UserUtterance - tracker now has {len(tracker.events)} events."
)
@staticmethod
def _should_handle_message(tracker: DialogueStateTracker):
return (
not tracker.is_paused()
or tracker.latest_message.intent.get(INTENT_NAME_KEY) == USER_INTENT_RESTART
)
def is_action_limit_reached(
self, num_predicted_actions: int, should_predict_another_action: bool
) -> bool:
"""Check whether the maximum number of predictions has been met.
Args:
num_predicted_actions: Number of predicted actions.
should_predict_another_action: Whether the last executed action allows
for more actions to be predicted or not.
Returns:
`True` if the limit of actions to predict has been reached.
"""
return (
num_predicted_actions >= self.max_number_of_predictions
and should_predict_another_action
)
async def _predict_and_execute_next_action(
self, output_channel: OutputChannel, tracker: DialogueStateTracker
):
# keep taking actions decided by the policy until it chooses to 'listen'
should_predict_another_action = True
num_predicted_actions = 0
# action loop. predicts actions until we hit action listen
while (
should_predict_another_action
and self._should_handle_message(tracker)
and num_predicted_actions < self.max_number_of_predictions
):
# this actually just calls the policy's method by the same name
action, prediction = self.predict_next_action(tracker)
should_predict_another_action = await self._run_action(
action, tracker, output_channel, self.nlg, prediction
)
num_predicted_actions += 1
if self.is_action_limit_reached(
num_predicted_actions, should_predict_another_action
):
# circuit breaker was tripped
logger.warning(
"Circuit breaker tripped. Stopped predicting "
f"more actions for sender '{tracker.sender_id}'."
)
if self.on_circuit_break:
# call a registered callback
self.on_circuit_break(tracker, output_channel, self.nlg)
@staticmethod
def should_predict_another_action(action_name: Text) -> bool:
"""Determine whether the processor should predict another action.
Args:
action_name: Name of the latest executed action.
Returns:
`False` if `action_name` is `ACTION_LISTEN_NAME` or
`ACTION_SESSION_START_NAME`, otherwise `True`.
"""
return action_name not in (ACTION_LISTEN_NAME, ACTION_SESSION_START_NAME)
async def execute_side_effects(
self,
events: List[Event],
tracker: DialogueStateTracker,
output_channel: OutputChannel,
) -> None:
"""Send bot messages, schedule and cancel reminders that are logged
in the events array."""
await self._send_bot_messages(events, tracker, output_channel)
await self._schedule_reminders(events, tracker, output_channel)
await self._cancel_reminders(events, tracker)
@staticmethod
async def _send_bot_messages(
events: List[Event],
tracker: DialogueStateTracker,
output_channel: OutputChannel,
) -> None:
"""Send all the bot messages that are logged in the events array."""
for e in events:
if not isinstance(e, BotUttered):
continue
await output_channel.send_response(tracker.sender_id, e.message())
async def _schedule_reminders(
self,
events: List[Event],
tracker: DialogueStateTracker,
output_channel: OutputChannel,
) -> None:
"""Uses the scheduler to time a job to trigger the passed reminder.
Reminders with the same `id` property will overwrite one another
(i.e. only one of them will eventually run).
"""
for e in events:
if not isinstance(e, ReminderScheduled):
continue
(await jobs.scheduler()).add_job(
self.handle_reminder,
"date",
run_date=e.trigger_date_time,
args=[e, tracker.sender_id, output_channel],
id=e.name,
replace_existing=True,
name=e.scheduled_job_name(tracker.sender_id),
)
@staticmethod
async def _cancel_reminders(
events: List[Event], tracker: DialogueStateTracker
) -> None:
"""Cancel reminders that match the `ReminderCancelled` event."""
# All Reminders specified by ReminderCancelled events will be cancelled
for event in events:
if isinstance(event, ReminderCancelled):
scheduler = await jobs.scheduler()
for scheduled_job in scheduler.get_jobs():
if event.cancels_job_with_name(
scheduled_job.name, tracker.sender_id
):
scheduler.remove_job(scheduled_job.id)
async def _run_action(
self,
action: rasa.core.actions.action.Action,
tracker: DialogueStateTracker,
output_channel: OutputChannel,
nlg: NaturalLanguageGenerator,
prediction: PolicyPrediction,
metadata: Optional[Dict[Text, Any]] = None,
) -> bool:
# events and return values are used to update
# the tracker state after an action has been taken
try:
# Here we set optional metadata to the ActionSessionStart, which will then
# be passed to the SessionStart event. Otherwise the metadata will be lost.
if action.name() == ACTION_SESSION_START_NAME:
action.metadata = metadata
# Use temporary tracker as we might need to discard the policy events in
# case of a rejection.
temporary_tracker = tracker.copy()
temporary_tracker.update_with_events(prediction.events, self.domain)
events = await action.run(
output_channel, nlg, temporary_tracker, self.domain
)
except rasa.core.actions.action.ActionExecutionRejection:
events = [
ActionExecutionRejected(
action.name(), prediction.policy_name, prediction.max_confidence
)
]
tracker.update(events[0])
return self.should_predict_another_action(action.name())
except Exception:
logger.exception(
f"Encountered an exception while running action '{action.name()}'."
"Bot will continue, but the actions events are lost. "
"Please check the logs of your action server for "
"more information."
)
events = []
self._log_action_on_tracker(tracker, action, events, prediction)
if action.name() != ACTION_LISTEN_NAME and not action.name().startswith(
UTTER_PREFIX
):
self._log_slots(tracker)
await self.execute_side_effects(events, tracker, output_channel)
return self.should_predict_another_action(action.name())
def _warn_about_new_slots(self, tracker, action_name, events) -> None:
# these are the events from that action we have seen during training
if (
not self.policy_ensemble
or action_name not in self.policy_ensemble.action_fingerprints
):
return
fp = self.policy_ensemble.action_fingerprints[action_name]
slots_seen_during_train = fp.get(SLOTS, set())
for e in events:
if isinstance(e, SlotSet) and e.key not in slots_seen_during_train:
s = tracker.slots.get(e.key)
if s and s.has_features():
if e.key == REQUESTED_SLOT and tracker.active_loop:
pass
else:
rasa.shared.utils.io.raise_warning(
f"Action '{action_name}' set a slot type '{e.key}' which "
f"it never set during the training. This "
f"can throw off the prediction. Make sure to "
f"include training examples in your stories "
f"for the different types of slots this "
f"action can return. Remember: you need to "
f"set the slots manually in the stories by "
f"adding '- slot{{\"{e.key}\": {e.value}}}' "
f"after the action."
)
def _log_action_on_tracker(
self,
tracker: DialogueStateTracker,
action: Action,
events: Optional[List[Event]],
prediction: PolicyPrediction,
) -> None:
# Ensures that the code still works even if a lazy programmer missed
# to type `return []` at the end of an action or the run method
# returns `None` for some other reason.
if events is None:
events = []
self._warn_about_new_slots(tracker, action.name(), events)
action_was_rejected_manually = any(
isinstance(event, ActionExecutionRejected) for event in events
)
if not action_was_rejected_manually:
logger.debug(f"Policy prediction ended with events '{prediction.events}'.")
tracker.update_with_events(prediction.events, self.domain)
# log the action and its produced events
tracker.update(action.event_for_successful_execution(prediction))
logger.debug(f"Action '{action.name()}' ended with events '{events}'.")
tracker.update_with_events(events, self.domain)
def _has_session_expired(self, tracker: DialogueStateTracker) -> bool:
"""Determine whether the latest session in `tracker` has expired.
Args:
tracker: Tracker to inspect.
Returns:
`True` if the session in `tracker` has expired, `False` otherwise.
"""
if not self.domain.session_config.are_sessions_enabled():
# tracker has never expired if sessions are disabled
return False
user_uttered_event: Optional[UserUttered] = tracker.get_last_event_for(
UserUttered
)
if not user_uttered_event:
# there is no user event so far so the session should not be considered
# expired
return False
time_delta_in_seconds = time.time() - user_uttered_event.timestamp
has_expired = (
time_delta_in_seconds / 60
> self.domain.session_config.session_expiration_time
)
if has_expired:
logger.debug(
f"The latest session for conversation ID '{tracker.sender_id}' has "
f"expired."
)
return has_expired
def _save_tracker(self, tracker: DialogueStateTracker) -> None:
self.tracker_store.save(tracker)
def _get_next_action_probabilities(
self, tracker: DialogueStateTracker
) -> PolicyPrediction:
"""Collect predictions from ensemble and return action and predictions."""
followup_action = tracker.followup_action
if followup_action:
tracker.clear_followup_action()
if followup_action in self.domain.action_names_or_texts:
return PolicyPrediction.for_action_name(
self.domain, followup_action, FOLLOWUP_ACTION
)
logger.error(
f"Trying to run unknown follow-up action '{followup_action}'. "
"Instead of running that, Rasa Open Source will ignore the action "
"and predict the next action."
)
prediction = self.policy_ensemble.probabilities_using_best_policy(
tracker, self.domain, self.interpreter
)
if isinstance(prediction, PolicyPrediction):
return prediction
rasa.shared.utils.io.raise_deprecation_warning(
f"Returning a tuple of probabilities and policy name for "
f"`{PolicyEnsemble.probabilities_using_best_policy.__name__}` is "
f"deprecated and will be removed in Rasa Open Source 3.0.0. Please return "
f"a `{PolicyPrediction.__name__}` object instead."
)
probabilities, policy_name = prediction
return PolicyPrediction(probabilities, policy_name)
|
en
| 0.83511
|
Handle a single message with this processor. # preprocess message if necessary # save tracker state to continue conversation from this state # save tracker state to continue conversation from this state Predict the next action for the current conversation state. Args: sender_id: Conversation ID. Returns: The prediction for the next action. `None` if no domain or policies loaded. # we have a Tracker instance for each user # which maintains conversation state # save tracker state to continue conversation from this state Predict the next action for a given conversation state. Args: tracker: A tracker representing a conversation state. verbosity: Verbosity for the returned conversation state. Returns: The prediction for the next action. `None` if no domain or policies loaded. # save tracker state to continue conversation from this state Check the current session in `tracker` and update it if expired. An 'action_session_start' is run if the latest tracker session has expired, or if the tracker does not yet contain any events (only those after the last restart are considered). Args: metadata: Data sent from client associated with the incoming user message. tracker: Tracker to inspect. output_channel: Output channel for potential utterances in a custom `ActionSessionStart`. Fetches tracker for `sender_id` and updates its conversation session. If a new tracker is created, `action_session_start` is run. Args: metadata: Data sent from client associated with the incoming user message. output_channel: Output channel associated with the incoming user message. sender_id: Conversation ID for which to fetch the tracker. Returns: Tracker for `sender_id`. Fetches tracker for `sender_id` and runs a session start if it's a new tracker. Args: metadata: Data sent from client associated with the incoming user message. output_channel: Output channel associated with the incoming user message. sender_id: Conversation ID for which to fetch the tracker. Returns: Tracker for `sender_id`. # run session start only if the tracker is empty Get the tracker for a conversation. In contrast to `fetch_tracker_and_update_session` this does not add any `action_session_start` or `session_start` events at the beginning of a conversation. Args: conversation_id: The ID of the conversation for which the history should be retrieved. Returns: Tracker for the conversation. Creates an empty tracker in case it's a new conversation. Fetches all trackers for a conversation. Individual trackers are returned for each conversation session found for `conversation_id`. Args: conversation_id: The ID of the conversation for which the trackers should be retrieved. Returns: Trackers for the conversation. Log `message` on tracker belonging to the message's conversation_id. Optionally save the tracker if `should_save_tracker` is `True`. Tracker saving can be skipped if the tracker returned by this method is used for further processing and saved at a later stage. # we have a Tracker instance for each user # which maintains conversation state # save tracker state to continue conversation from this state Execute an action for a conversation. Note that this might lead to unexpected bot behavior. Rather use an intent to execute certain behavior within a conversation (e.g. by using `trigger_external_user_uttered`). Args: sender_id: The ID of the conversation. action_name: The name of the action which should be executed. output_channel: The output channel which should be used for bot responses. nlg: The response generator. prediction: The prediction for the action. Returns: The new conversation state. Note that the new state is also persisted. # we have a Tracker instance for each user # which maintains conversation state # save tracker state to continue conversation from this state Predicts the next action the bot should take after seeing x. This should be overwritten by more advanced policies to use ML to predict the action. Returns the index of the next action. Check if the conversation has been restarted after reminder. # not found in applied events --> has been restarted Check if the user sent a message after the reminder. # tracker has probably been restarted Handle a reminder that is triggered asynchronously. Triggers an external message. Triggers an external message (like a user message, but invisible; used, e.g., by a reminder or the trigger_intent endpoint). Args: intent_name: Name of the intent to be triggered. entities: Entities to be passed on. tracker: The tracker to which the event should be added. output_channel: The output channel. # Allow for a short-hand notation {"ent1": "val1", "ent2": "val2", ...}. # Useful if properties like 'start', 'end', or 'extractor' are not given, # e.g. for external events. # Set the new event's input channel to the latest input channel, so # that we don't lose this property. # save tracker state to continue conversation from this state # Log currently set slots Warns the user if the NLU parse data contains unrecognized features. Checks intents and entities picked up by the NLU interpreter against the domain and warns the user of those that don't match. Also considers a list of default intents that are valid but don't need to be listed in the domain. Args: parse_data: NLUInterpreter parse data to check against the domain. Interprete the passed message using the NLU interpreter. Arguments: message: Message to handle emotional_matrix: Emotional matrix switch, will use Gabriel's Emotional Emulator if True tracker: Dialogue context of the message Returns: Parsed data extracted from the message. # preprocess message if necessary # process bot and user emotions # for testing - you can short-cut the NLU part with a message # in the format /intent{"entity1": val1, "entity2": val2} # parse_data is a dict of intent & entities # don't ever directly mutate the tracker # - instead pass its events to log Check whether the maximum number of predictions has been met. Args: num_predicted_actions: Number of predicted actions. should_predict_another_action: Whether the last executed action allows for more actions to be predicted or not. Returns: `True` if the limit of actions to predict has been reached. # keep taking actions decided by the policy until it chooses to 'listen' # action loop. predicts actions until we hit action listen # this actually just calls the policy's method by the same name # circuit breaker was tripped # call a registered callback Determine whether the processor should predict another action. Args: action_name: Name of the latest executed action. Returns: `False` if `action_name` is `ACTION_LISTEN_NAME` or `ACTION_SESSION_START_NAME`, otherwise `True`. Send bot messages, schedule and cancel reminders that are logged in the events array. Send all the bot messages that are logged in the events array. Uses the scheduler to time a job to trigger the passed reminder. Reminders with the same `id` property will overwrite one another (i.e. only one of them will eventually run). Cancel reminders that match the `ReminderCancelled` event. # All Reminders specified by ReminderCancelled events will be cancelled # events and return values are used to update # the tracker state after an action has been taken # Here we set optional metadata to the ActionSessionStart, which will then # be passed to the SessionStart event. Otherwise the metadata will be lost. # Use temporary tracker as we might need to discard the policy events in # case of a rejection. # these are the events from that action we have seen during training # Ensures that the code still works even if a lazy programmer missed # to type `return []` at the end of an action or the run method # returns `None` for some other reason. # log the action and its produced events Determine whether the latest session in `tracker` has expired. Args: tracker: Tracker to inspect. Returns: `True` if the session in `tracker` has expired, `False` otherwise. # tracker has never expired if sessions are disabled # there is no user event so far so the session should not be considered # expired Collect predictions from ensemble and return action and predictions.
| 1.75795
| 2
|
dechat/__init__.py
|
robobrobro/dechat
| 1
|
6629692
|
"""
Dechat - Distributed, Encrypted CHAT client
"""
from . import errors, messaging, user
|
"""
Dechat - Distributed, Encrypted CHAT client
"""
from . import errors, messaging, user
|
en
| 0.692319
|
Dechat - Distributed, Encrypted CHAT client
| 0.484286
| 0
|
pypower/makeYbus.py
|
Bengt/PYPOWER
| 221
|
6629693
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Builds the bus admittance matrix and branch admittance matrices.
"""
from sys import stderr
from numpy import ones, conj, nonzero, any, exp, pi, r_
from scipy.sparse import csr_matrix
from pypower.idx_bus import BUS_I, GS, BS
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, BR_B, BR_STATUS, SHIFT, TAP
def makeYbus(baseMVA, bus, branch):
"""Builds the bus admittance matrix and branch admittance matrices.
Returns the full bus admittance matrix (i.e. for all buses) and the
matrices C{Yf} and C{Yt} which, when multiplied by a complex voltage
vector, yield the vector currents injected into each line from the
"from" and "to" buses respectively of each line. Does appropriate
conversions to p.u.
@see: L{makeSbus}
@author: <NAME> (PSERC Cornell)
"""
## constants
nb = bus.shape[0] ## number of buses
nl = branch.shape[0] ## number of lines
## check that bus numbers are equal to indices to bus (one set of bus nums)
if any(bus[:, BUS_I] != list(range(nb))):
stderr.write('buses must appear in order by bus number\n')
## for each branch, compute the elements of the branch admittance matrix where
##
## | If | | Yff Yft | | Vf |
## | | = | | * | |
## | It | | Ytf Ytt | | Vt |
##
stat = branch[:, BR_STATUS] ## ones at in-service branches
Ys = stat / (branch[:, BR_R] + 1j * branch[:, BR_X]) ## series admittance
Bc = stat * branch[:, BR_B] ## line charging susceptance
tap = ones(nl) ## default tap ratio = 1
i = nonzero(branch[:, TAP]) ## indices of non-zero tap ratios
tap[i] = branch[i, TAP] ## assign non-zero tap ratios
tap = tap * exp(1j * pi / 180 * branch[:, SHIFT]) ## add phase shifters
Ytt = Ys + 1j * Bc / 2
Yff = Ytt / (tap * conj(tap))
Yft = - Ys / conj(tap)
Ytf = - Ys / tap
## compute shunt admittance
## if Psh is the real power consumed by the shunt at V = 1.0 p.u.
## and Qsh is the reactive power injected by the shunt at V = 1.0 p.u.
## then Psh - j Qsh = V * conj(Ysh * V) = conj(Ysh) = Gs - j Bs,
## i.e. Ysh = Psh + j Qsh, so ...
## vector of shunt admittances
Ysh = (bus[:, GS] + 1j * bus[:, BS]) / baseMVA
## build connection matrices
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
## connection matrix for line & from buses
Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb))
## connection matrix for line & to buses
Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb))
## build Yf and Yt such that Yf * V is the vector of complex branch currents injected
## at each branch's "from" bus, and Yt is the same for the "to" bus end
i = r_[range(nl), range(nl)] ## double set of row indices
Yf = csr_matrix((r_[Yff, Yft], (i, r_[f, t])), (nl, nb))
Yt = csr_matrix((r_[Ytf, Ytt], (i, r_[f, t])), (nl, nb))
# Yf = spdiags(Yff, 0, nl, nl) * Cf + spdiags(Yft, 0, nl, nl) * Ct
# Yt = spdiags(Ytf, 0, nl, nl) * Cf + spdiags(Ytt, 0, nl, nl) * Ct
## build Ybus
Ybus = Cf.T * Yf + Ct.T * Yt + \
csr_matrix((Ysh, (range(nb), range(nb))), (nb, nb))
return Ybus, Yf, Yt
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Builds the bus admittance matrix and branch admittance matrices.
"""
from sys import stderr
from numpy import ones, conj, nonzero, any, exp, pi, r_
from scipy.sparse import csr_matrix
from pypower.idx_bus import BUS_I, GS, BS
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, BR_B, BR_STATUS, SHIFT, TAP
def makeYbus(baseMVA, bus, branch):
"""Builds the bus admittance matrix and branch admittance matrices.
Returns the full bus admittance matrix (i.e. for all buses) and the
matrices C{Yf} and C{Yt} which, when multiplied by a complex voltage
vector, yield the vector currents injected into each line from the
"from" and "to" buses respectively of each line. Does appropriate
conversions to p.u.
@see: L{makeSbus}
@author: <NAME> (PSERC Cornell)
"""
## constants
nb = bus.shape[0] ## number of buses
nl = branch.shape[0] ## number of lines
## check that bus numbers are equal to indices to bus (one set of bus nums)
if any(bus[:, BUS_I] != list(range(nb))):
stderr.write('buses must appear in order by bus number\n')
## for each branch, compute the elements of the branch admittance matrix where
##
## | If | | Yff Yft | | Vf |
## | | = | | * | |
## | It | | Ytf Ytt | | Vt |
##
stat = branch[:, BR_STATUS] ## ones at in-service branches
Ys = stat / (branch[:, BR_R] + 1j * branch[:, BR_X]) ## series admittance
Bc = stat * branch[:, BR_B] ## line charging susceptance
tap = ones(nl) ## default tap ratio = 1
i = nonzero(branch[:, TAP]) ## indices of non-zero tap ratios
tap[i] = branch[i, TAP] ## assign non-zero tap ratios
tap = tap * exp(1j * pi / 180 * branch[:, SHIFT]) ## add phase shifters
Ytt = Ys + 1j * Bc / 2
Yff = Ytt / (tap * conj(tap))
Yft = - Ys / conj(tap)
Ytf = - Ys / tap
## compute shunt admittance
## if Psh is the real power consumed by the shunt at V = 1.0 p.u.
## and Qsh is the reactive power injected by the shunt at V = 1.0 p.u.
## then Psh - j Qsh = V * conj(Ysh * V) = conj(Ysh) = Gs - j Bs,
## i.e. Ysh = Psh + j Qsh, so ...
## vector of shunt admittances
Ysh = (bus[:, GS] + 1j * bus[:, BS]) / baseMVA
## build connection matrices
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
## connection matrix for line & from buses
Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb))
## connection matrix for line & to buses
Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb))
## build Yf and Yt such that Yf * V is the vector of complex branch currents injected
## at each branch's "from" bus, and Yt is the same for the "to" bus end
i = r_[range(nl), range(nl)] ## double set of row indices
Yf = csr_matrix((r_[Yff, Yft], (i, r_[f, t])), (nl, nb))
Yt = csr_matrix((r_[Ytf, Ytt], (i, r_[f, t])), (nl, nb))
# Yf = spdiags(Yff, 0, nl, nl) * Cf + spdiags(Yft, 0, nl, nl) * Ct
# Yt = spdiags(Ytf, 0, nl, nl) * Cf + spdiags(Ytt, 0, nl, nl) * Ct
## build Ybus
Ybus = Cf.T * Yf + Ct.T * Yt + \
csr_matrix((Ysh, (range(nb), range(nb))), (nb, nb))
return Ybus, Yf, Yt
|
en
| 0.831479
|
# Copyright (c) 1996-2015 PSERC. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. Builds the bus admittance matrix and branch admittance matrices. Builds the bus admittance matrix and branch admittance matrices. Returns the full bus admittance matrix (i.e. for all buses) and the matrices C{Yf} and C{Yt} which, when multiplied by a complex voltage vector, yield the vector currents injected into each line from the "from" and "to" buses respectively of each line. Does appropriate conversions to p.u. @see: L{makeSbus} @author: <NAME> (PSERC Cornell) ## constants ## number of buses ## number of lines ## check that bus numbers are equal to indices to bus (one set of bus nums) ## for each branch, compute the elements of the branch admittance matrix where ## ## | If | | Yff Yft | | Vf | ## | | = | | * | | ## | It | | Ytf Ytt | | Vt | ## ## ones at in-service branches ## series admittance ## line charging susceptance ## default tap ratio = 1 ## indices of non-zero tap ratios ## assign non-zero tap ratios ## add phase shifters ## compute shunt admittance ## if Psh is the real power consumed by the shunt at V = 1.0 p.u. ## and Qsh is the reactive power injected by the shunt at V = 1.0 p.u. ## then Psh - j Qsh = V * conj(Ysh * V) = conj(Ysh) = Gs - j Bs, ## i.e. Ysh = Psh + j Qsh, so ... ## vector of shunt admittances ## build connection matrices ## list of "from" buses ## list of "to" buses ## connection matrix for line & from buses ## connection matrix for line & to buses ## build Yf and Yt such that Yf * V is the vector of complex branch currents injected ## at each branch's "from" bus, and Yt is the same for the "to" bus end ## double set of row indices # Yf = spdiags(Yff, 0, nl, nl) * Cf + spdiags(Yft, 0, nl, nl) * Ct # Yt = spdiags(Ytf, 0, nl, nl) * Cf + spdiags(Ytt, 0, nl, nl) * Ct ## build Ybus
| 2.625239
| 3
|
synapse/util/distributor.py
|
khanof/jsynapse
| 1
|
6629694
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.util.logcontext import (
PreserveLoggingContext, preserve_context_over_fn
)
from synapse.util import unwrapFirstError
import logging
logger = logging.getLogger(__name__)
def user_left_room(distributor, user, room_id):
return preserve_context_over_fn(
distributor.fire,
"user_left_room", user=user, room_id=room_id
)
def user_joined_room(distributor, user, room_id):
return preserve_context_over_fn(
distributor.fire,
"user_joined_room", user=user, room_id=room_id
)
class Distributor(object):
"""A central dispatch point for loosely-connected pieces of code to
register, observe, and fire signals.
Signals are named simply by strings.
TODO(paul): It would be nice to give signals stronger object identities,
so we can attach metadata, docstrings, detect typoes, etc... But this
model will do for today.
"""
def __init__(self, suppress_failures=True):
self.suppress_failures = suppress_failures
self.signals = {}
self.pre_registration = {}
def declare(self, name):
if name in self.signals:
raise KeyError("%r already has a signal named %s" % (self, name))
self.signals[name] = Signal(
name,
suppress_failures=self.suppress_failures,
)
if name in self.pre_registration:
signal = self.signals[name]
for observer in self.pre_registration[name]:
signal.observe(observer)
def observe(self, name, observer):
if name in self.signals:
self.signals[name].observe(observer)
else:
# TODO: Avoid strong ordering dependency by allowing people to
# pre-register observations on signals that don't exist yet.
if name not in self.pre_registration:
self.pre_registration[name] = []
self.pre_registration[name].append(observer)
def fire(self, name, *args, **kwargs):
if name not in self.signals:
raise KeyError("%r does not have a signal named %s" % (self, name))
return self.signals[name].fire(*args, **kwargs)
class Signal(object):
"""A Signal is a dispatch point that stores a list of callables as
observers of it.
Signals can be "fired", meaning that every callable observing it is
invoked. Firing a signal does not change its state; it can be fired again
at any later point. Firing a signal passes any arguments from the fire
method into all of the observers.
"""
def __init__(self, name, suppress_failures):
self.name = name
self.suppress_failures = suppress_failures
self.observers = []
def observe(self, observer):
"""Adds a new callable to the observer list which will be invoked by
the 'fire' method.
Each observer callable may return a Deferred."""
self.observers.append(observer)
@defer.inlineCallbacks
def fire(self, *args, **kwargs):
"""Invokes every callable in the observer list, passing in the args and
kwargs. Exceptions thrown by observers are logged but ignored. It is
not an error to fire a signal with no observers.
Returns a Deferred that will complete when all the observers have
completed."""
def do(observer):
def eb(failure):
logger.warning(
"%s signal observer %s failed: %r",
self.name, observer, failure,
exc_info=(
failure.type,
failure.value,
failure.getTracebackObject()))
if not self.suppress_failures:
return failure
return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb)
with PreserveLoggingContext():
deferreds = [
do(observer)
for observer in self.observers
]
res = yield defer.gatherResults(
deferreds, consumeErrors=True
).addErrback(unwrapFirstError)
defer.returnValue(res)
def __repr__(self):
return "<Signal name=%r>" % (self.name,)
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.util.logcontext import (
PreserveLoggingContext, preserve_context_over_fn
)
from synapse.util import unwrapFirstError
import logging
logger = logging.getLogger(__name__)
def user_left_room(distributor, user, room_id):
return preserve_context_over_fn(
distributor.fire,
"user_left_room", user=user, room_id=room_id
)
def user_joined_room(distributor, user, room_id):
return preserve_context_over_fn(
distributor.fire,
"user_joined_room", user=user, room_id=room_id
)
class Distributor(object):
"""A central dispatch point for loosely-connected pieces of code to
register, observe, and fire signals.
Signals are named simply by strings.
TODO(paul): It would be nice to give signals stronger object identities,
so we can attach metadata, docstrings, detect typoes, etc... But this
model will do for today.
"""
def __init__(self, suppress_failures=True):
self.suppress_failures = suppress_failures
self.signals = {}
self.pre_registration = {}
def declare(self, name):
if name in self.signals:
raise KeyError("%r already has a signal named %s" % (self, name))
self.signals[name] = Signal(
name,
suppress_failures=self.suppress_failures,
)
if name in self.pre_registration:
signal = self.signals[name]
for observer in self.pre_registration[name]:
signal.observe(observer)
def observe(self, name, observer):
if name in self.signals:
self.signals[name].observe(observer)
else:
# TODO: Avoid strong ordering dependency by allowing people to
# pre-register observations on signals that don't exist yet.
if name not in self.pre_registration:
self.pre_registration[name] = []
self.pre_registration[name].append(observer)
def fire(self, name, *args, **kwargs):
if name not in self.signals:
raise KeyError("%r does not have a signal named %s" % (self, name))
return self.signals[name].fire(*args, **kwargs)
class Signal(object):
"""A Signal is a dispatch point that stores a list of callables as
observers of it.
Signals can be "fired", meaning that every callable observing it is
invoked. Firing a signal does not change its state; it can be fired again
at any later point. Firing a signal passes any arguments from the fire
method into all of the observers.
"""
def __init__(self, name, suppress_failures):
self.name = name
self.suppress_failures = suppress_failures
self.observers = []
def observe(self, observer):
"""Adds a new callable to the observer list which will be invoked by
the 'fire' method.
Each observer callable may return a Deferred."""
self.observers.append(observer)
@defer.inlineCallbacks
def fire(self, *args, **kwargs):
"""Invokes every callable in the observer list, passing in the args and
kwargs. Exceptions thrown by observers are logged but ignored. It is
not an error to fire a signal with no observers.
Returns a Deferred that will complete when all the observers have
completed."""
def do(observer):
def eb(failure):
logger.warning(
"%s signal observer %s failed: %r",
self.name, observer, failure,
exc_info=(
failure.type,
failure.value,
failure.getTracebackObject()))
if not self.suppress_failures:
return failure
return defer.maybeDeferred(observer, *args, **kwargs).addErrback(eb)
with PreserveLoggingContext():
deferreds = [
do(observer)
for observer in self.observers
]
res = yield defer.gatherResults(
deferreds, consumeErrors=True
).addErrback(unwrapFirstError)
defer.returnValue(res)
def __repr__(self):
return "<Signal name=%r>" % (self.name,)
|
en
| 0.905815
|
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A central dispatch point for loosely-connected pieces of code to register, observe, and fire signals. Signals are named simply by strings. TODO(paul): It would be nice to give signals stronger object identities, so we can attach metadata, docstrings, detect typoes, etc... But this model will do for today. # TODO: Avoid strong ordering dependency by allowing people to # pre-register observations on signals that don't exist yet. A Signal is a dispatch point that stores a list of callables as observers of it. Signals can be "fired", meaning that every callable observing it is invoked. Firing a signal does not change its state; it can be fired again at any later point. Firing a signal passes any arguments from the fire method into all of the observers. Adds a new callable to the observer list which will be invoked by the 'fire' method. Each observer callable may return a Deferred. Invokes every callable in the observer list, passing in the args and kwargs. Exceptions thrown by observers are logged but ignored. It is not an error to fire a signal with no observers. Returns a Deferred that will complete when all the observers have completed.
| 1.882518
| 2
|
generate.py
|
netotz/p-dispersion-problem
| 1
|
6629695
|
<filename>generate.py
'''
Generate an instance from the command line and save it to a file.
'''
from generator import generate_instance, parse_arguments
args = parse_arguments()
generate_instance(*args)
|
<filename>generate.py
'''
Generate an instance from the command line and save it to a file.
'''
from generator import generate_instance, parse_arguments
args = parse_arguments()
generate_instance(*args)
|
en
| 0.870743
|
Generate an instance from the command line and save it to a file.
| 2.785068
| 3
|
setup.py
|
Clinical-Genomics/chanjo-report
| 0
|
6629696
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Based on https://github.com/pypa/sampleproject/blob/master/setup.py."""
import codecs
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
# shortcut for building/publishing to Pypi
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel upload')
sys.exit()
# this is a plug-in for setuptools that will invoke py.test
class PyTest(TestCommand):
"""Set up the py.test test runner."""
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
"""Set options for the command line."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Execute the test runner command."""
# import here, because outside the required eggs aren't loaded yet
import pytest
sys.exit(pytest.main(self.test_args))
# get the long description from the relevant file
HERE = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(HERE, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(name='chanjo-report',
# versions should comply with PEP440
version='4.7.0',
description='Automatically render coverage reports from Chanjo ouput',
long_description=LONG_DESCRIPTION,
# what does your project relate to?
keywords='chanjo-report development',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
# the project's main homepage
url='https://github.com/robinandeer/chanjo-report',
packages=find_packages(exclude=('tests*', 'docs', 'examples')),
# if there are data files included in your packages
include_package_data=True,
package_data={
'chanjo_report': [
'server/blueprints/report/static/*.css',
'server/blueprints/report/static/vendor/*.css',
'server/blueprints/report/templates/report/*.html',
'server/blueprints/report/templates/report/layouts/*.html',
'server/blueprints/report/templates/report/components/*.html',
'server/translations/sv/LC_MESSAGES/*',
]
},
zip_safe=False,
install_requires=[
'setuptools',
'chanjo>=4.1.0',
'Flask-WeasyPrint',
'cairocffi',
'lxml>=3.0',
'cffi',
'Flask',
'SQLAlchemy',
'Flask-Babel',
'tabulate',
'Flask-Alchy',
'Flask-SQLAlchemy==2.1',
'pymysql',
],
tests_require=['pytest'],
cmdclass={'test': PyTest},
# to provide executable scripts, use entry points
entry_points={
'chanjo.subcommands.4': ['report = chanjo_report.cli:report'],
},
# see: http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Environment :: Console'
])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Based on https://github.com/pypa/sampleproject/blob/master/setup.py."""
import codecs
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
# shortcut for building/publishing to Pypi
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel upload')
sys.exit()
# this is a plug-in for setuptools that will invoke py.test
class PyTest(TestCommand):
"""Set up the py.test test runner."""
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
"""Set options for the command line."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Execute the test runner command."""
# import here, because outside the required eggs aren't loaded yet
import pytest
sys.exit(pytest.main(self.test_args))
# get the long description from the relevant file
HERE = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(HERE, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(name='chanjo-report',
# versions should comply with PEP440
version='4.7.0',
description='Automatically render coverage reports from Chanjo ouput',
long_description=LONG_DESCRIPTION,
# what does your project relate to?
keywords='chanjo-report development',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
# the project's main homepage
url='https://github.com/robinandeer/chanjo-report',
packages=find_packages(exclude=('tests*', 'docs', 'examples')),
# if there are data files included in your packages
include_package_data=True,
package_data={
'chanjo_report': [
'server/blueprints/report/static/*.css',
'server/blueprints/report/static/vendor/*.css',
'server/blueprints/report/templates/report/*.html',
'server/blueprints/report/templates/report/layouts/*.html',
'server/blueprints/report/templates/report/components/*.html',
'server/translations/sv/LC_MESSAGES/*',
]
},
zip_safe=False,
install_requires=[
'setuptools',
'chanjo>=4.1.0',
'Flask-WeasyPrint',
'cairocffi',
'lxml>=3.0',
'cffi',
'Flask',
'SQLAlchemy',
'Flask-Babel',
'tabulate',
'Flask-Alchy',
'Flask-SQLAlchemy==2.1',
'pymysql',
],
tests_require=['pytest'],
cmdclass={'test': PyTest},
# to provide executable scripts, use entry points
entry_points={
'chanjo.subcommands.4': ['report = chanjo_report.cli:report'],
},
# see: http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Environment :: Console'
])
|
en
| 0.846172
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Based on https://github.com/pypa/sampleproject/blob/master/setup.py. # shortcut for building/publishing to Pypi # this is a plug-in for setuptools that will invoke py.test Set up the py.test test runner. Set options for the command line. Execute the test runner command. # import here, because outside the required eggs aren't loaded yet # get the long description from the relevant file # versions should comply with PEP440 # what does your project relate to? # the project's main homepage # if there are data files included in your packages # to provide executable scripts, use entry points # see: http://pypi.python.org/pypi?%3Aaction=list_classifiers
| 2.011851
| 2
|
tests/functional/test_deployments.py
|
Paperspace/paperspace-python
| 47
|
6629697
|
import mock
from click.testing import CliRunner
import paperspace.client
from paperspace.cli import cli
from paperspace.commands import deployments as deployments_commands
from tests import example_responses, MockResponse
EXPECTED_HEADERS = deployments_commands.default_headers
class TestDeploymentsCreate(object):
URL = "https://api.paperspace.io/deployments/createDeployment/"
EXPECTED_HEADERS_WITH_CHANGED_API_KEY = paperspace.client.default_headers.copy()
EXPECTED_HEADERS_WITH_CHANGED_API_KEY["X-API-Key"] = "some_key"
BASIC_OPTIONS_COMMAND = [
"deployments", "create",
"--deploymentType", "tfserving",
"--modelId", "some_model_id",
"--name", "some_name",
"--machineType", "G1",
"--imageUrl", "https://www.latlmes.com/breaking/paperspace-now-has-a-100-bilion-valuation",
"--instanceCount", "666",
]
BASIC_OPTIONS_COMMAND_WITH_API_KEY = [
"deployments", "create",
"--deploymentType", "tfserving",
"--modelId", "some_model_id",
"--name", "some_name",
"--machineType", "G1",
"--imageUrl", "https://www.latlmes.com/breaking/paperspace-now-has-a-100-bilion-valuation",
"--instanceCount", "666",
"--apiKey", "some_key",
]
BASIC_OPTIONS_REQUEST = {
"machineType": u"G1",
"name": u"some_name",
"imageUrl": u"https://www.latlmes.com/breaking/paperspace-now-has-a-100-bilion-valuation",
"deploymentType": "Tensorflow Serving on K8s",
"instanceCount": 666,
"modelId": u"some_model_id",
}
RESPONSE_JSON_200 = example_responses.CREATE_DEPLOYMENT_WITH_BASIC_OPTIONS_RESPONSE
EXPECTED_STDOUT = "New deployment created with id: sadkfhlskdjh\n"
RESPONSE_JSON_404_MODEL_NOT_FOUND = {"error": {"name": "Error", "status": 404, "message": "Unable to find model"}}
RESPONSE_CONTENT_404_MODEL_NOT_FOUND = b'{"error":{"name":"Error","status":404,"message":"Unable to find model"}}\n'
EXPECTED_STDOUT_MODEL_NOT_FOUND = "Unable to find model\n"
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_create_deployment_with_basic_options(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_200, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.BASIC_OPTIONS_REQUEST,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_different_api_key_when_api_key_parameter_was_used(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_200, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND_WITH_API_KEY)
post_patched.assert_called_once_with(self.URL,
headers=self.EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.BASIC_OPTIONS_REQUEST,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_create_wrong_model_id_was_given(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_404_MODEL_NOT_FOUND, 404,
self.RESPONSE_CONTENT_404_MODEL_NOT_FOUND)
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.BASIC_OPTIONS_REQUEST,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT_MODEL_NOT_FOUND
assert result.exit_code == 0
class TestDeploymentsList(object):
URL = "https://api.paperspace.io/deployments/getDeploymentList/"
COMMAND = ["deployments", "list"]
LIST_JSON = example_responses.LIST_DEPLOYMENTS
COMMAND_WITH_API_KEY = ["deployments", "list", "--apiKey", "some_key"]
EXPECTED_HEADERS_WITH_CHANGED_API_KEY = paperspace.client.default_headers.copy()
EXPECTED_HEADERS_WITH_CHANGED_API_KEY["X-API-Key"] = "some_key"
COMMAND_WITH_FILTER_WITH_STATE = ["deployments", "list", "--state", "Stopped"]
LIST_WITH_FILTER_REQUEST_JSON = {"filter": {"where": {"and": [{"state": "Stopped"}]}}}
LIST_WITH_FILTER_RESPONSE_JSON_WHEN_NO_DEPLOYMENTS_FOUND = {"deploymentList": [], "total": 17, "displayTotal": 0,
"runningTotal": 0}
DETAILS_STDOUT = """+-----------+-----------------+----------------------------------------------------------------------------------+---------------+---------------------------+
| Name | ID | Endpoint | Api Type | Deployment Type |
+-----------+-----------------+----------------------------------------------------------------------------------+---------------+---------------------------+
| some_name | dev61ity7lx232 | https://development-services.paperspace.io/model-serving/dev61ity7lx232:predict | some_api_type | Tensorflow Serving on K8s |
| some_name | desanw1jptk7woh | https://development-services.paperspace.io/model-serving/desanw1jptk7woh:predict | REST | Tensorflow Serving on K8s |
| some_name | desfnnrqt1v633v | https://development-services.paperspace.io/model-serving/desfnnrqt1v633v:predict | REST | Tensorflow Serving on K8s |
| some_name | desdyn55d2e02su | https://development-services.paperspace.io/model-serving/desdyn55d2e02su:predict | REST | Tensorflow Serving on K8s |
| some_name | des3tmqa3s627o9 | https://development-services.paperspace.io/model-serving/des3tmqa3s627o9:predict | REST | Tensorflow Serving on K8s |
+-----------+-----------------+----------------------------------------------------------------------------------+---------------+---------------------------+
"""
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_and_print_list_of_deployments(self, get_patched):
get_patched.return_value = MockResponse(self.LIST_JSON, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=None)
assert result.output == self.DETAILS_STDOUT
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_with_custom_api_key_when_api_key_parameter_was_provided(self, get_patched):
get_patched.return_value = MockResponse(self.LIST_JSON, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY)
get_patched.assert_called_once_with(self.URL,
headers=self.EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=None,
params=None)
assert result.output == self.DETAILS_STDOUT
@mock.patch("paperspace.cli.deployments.deployments_commands.common.pydoc")
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_and_paginate_list_when_output_table_len_is_gt_lines_in_terminal(self, get_patched,
pydoc_patched):
list_json = {"deploymentList": self.LIST_JSON["deploymentList"] * 40}
get_patched.return_value = MockResponse(list_json, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=None)
pydoc_patched.pager.assert_called_once()
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_and_print_list_of_deployments_filtered_by_state(self, get_patched):
get_patched.return_value = MockResponse(self.LIST_JSON, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_FILTER_WITH_STATE)
get_patched.assert_called_with("https://api.paperspace.io/deployments/getDeploymentList/",
headers=EXPECTED_HEADERS,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == self.DETAILS_STDOUT
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_and_print_list_of_deployments_filtered_with_state_but_none_found(
self, get_patched):
get_patched.return_value = MockResponse(self.LIST_WITH_FILTER_RESPONSE_JSON_WHEN_NO_DEPLOYMENTS_FOUND, 200,
"fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_FILTER_WITH_STATE)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == "No data found\n"
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_print_proper_message_when_wrong_api_key_was_used(self, get_patched):
get_patched.return_value = MockResponse({"status": 400, "message": "Invalid API token"},
400)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=None)
assert result.output == "Invalid API token\n"
class TestStartDeployment(object):
URL = "https://api.paperspace.io/deployments/updateDeployment/"
COMMAND = ["deployments", "start",
"--id", "some_id"]
REQUEST_JSON = {"isRunning": True, "id": u"some_id"}
EXPECTED_STDOUT = "Deployment started\n"
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_deployments_start_was_used(self, post_patched):
post_patched.return_value = MockResponse(status_code=204)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.REQUEST_JSON,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
class TestStopDeployment(object):
URL = "https://api.paperspace.io/deployments/updateDeployment/"
COMMAND = ["deployments", "stop",
"--id", "some_id"]
REQUEST_JSON = {"isRunning": False, "id": u"some_id"}
EXPECTED_STDOUT = "Deployment stopped\n"
COMMAND_WITH_API_KEY = [
"deployments", "stop",
"--id", "some_id",
"--apiKey", "some_key",
]
EXPECTED_HEADERS_WITH_CHANGED_API_KEY = paperspace.client.default_headers.copy()
EXPECTED_HEADERS_WITH_CHANGED_API_KEY["X-API-Key"] = "some_key"
RESPONSE_JSON_400 = {"error": {"name": "Error", "status": 400, "message": "Unable to access deployment"}}
EXPECTED_STDOUT_WITH_WRONG_ID = "Unable to access deployment\n"
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_deployments_stop_was_used(self, post_patched):
post_patched.return_value = MockResponse(status_code=204)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.REQUEST_JSON,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_with_custom_api_key_when_api_key_parameter_was_provided(self, post_patched):
post_patched.return_value = MockResponse(status_code=204)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY)
post_patched.assert_called_once_with(self.URL,
headers=self.EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.REQUEST_JSON,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_deployments_stop_used_with_wrong_id(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_400, 400, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.REQUEST_JSON,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_ID
assert result.exit_code == 0
|
import mock
from click.testing import CliRunner
import paperspace.client
from paperspace.cli import cli
from paperspace.commands import deployments as deployments_commands
from tests import example_responses, MockResponse
EXPECTED_HEADERS = deployments_commands.default_headers
class TestDeploymentsCreate(object):
URL = "https://api.paperspace.io/deployments/createDeployment/"
EXPECTED_HEADERS_WITH_CHANGED_API_KEY = paperspace.client.default_headers.copy()
EXPECTED_HEADERS_WITH_CHANGED_API_KEY["X-API-Key"] = "some_key"
BASIC_OPTIONS_COMMAND = [
"deployments", "create",
"--deploymentType", "tfserving",
"--modelId", "some_model_id",
"--name", "some_name",
"--machineType", "G1",
"--imageUrl", "https://www.latlmes.com/breaking/paperspace-now-has-a-100-bilion-valuation",
"--instanceCount", "666",
]
BASIC_OPTIONS_COMMAND_WITH_API_KEY = [
"deployments", "create",
"--deploymentType", "tfserving",
"--modelId", "some_model_id",
"--name", "some_name",
"--machineType", "G1",
"--imageUrl", "https://www.latlmes.com/breaking/paperspace-now-has-a-100-bilion-valuation",
"--instanceCount", "666",
"--apiKey", "some_key",
]
BASIC_OPTIONS_REQUEST = {
"machineType": u"G1",
"name": u"some_name",
"imageUrl": u"https://www.latlmes.com/breaking/paperspace-now-has-a-100-bilion-valuation",
"deploymentType": "Tensorflow Serving on K8s",
"instanceCount": 666,
"modelId": u"some_model_id",
}
RESPONSE_JSON_200 = example_responses.CREATE_DEPLOYMENT_WITH_BASIC_OPTIONS_RESPONSE
EXPECTED_STDOUT = "New deployment created with id: sadkfhlskdjh\n"
RESPONSE_JSON_404_MODEL_NOT_FOUND = {"error": {"name": "Error", "status": 404, "message": "Unable to find model"}}
RESPONSE_CONTENT_404_MODEL_NOT_FOUND = b'{"error":{"name":"Error","status":404,"message":"Unable to find model"}}\n'
EXPECTED_STDOUT_MODEL_NOT_FOUND = "Unable to find model\n"
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_create_deployment_with_basic_options(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_200, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.BASIC_OPTIONS_REQUEST,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_different_api_key_when_api_key_parameter_was_used(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_200, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND_WITH_API_KEY)
post_patched.assert_called_once_with(self.URL,
headers=self.EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.BASIC_OPTIONS_REQUEST,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_create_wrong_model_id_was_given(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_404_MODEL_NOT_FOUND, 404,
self.RESPONSE_CONTENT_404_MODEL_NOT_FOUND)
runner = CliRunner()
result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.BASIC_OPTIONS_REQUEST,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT_MODEL_NOT_FOUND
assert result.exit_code == 0
class TestDeploymentsList(object):
URL = "https://api.paperspace.io/deployments/getDeploymentList/"
COMMAND = ["deployments", "list"]
LIST_JSON = example_responses.LIST_DEPLOYMENTS
COMMAND_WITH_API_KEY = ["deployments", "list", "--apiKey", "some_key"]
EXPECTED_HEADERS_WITH_CHANGED_API_KEY = paperspace.client.default_headers.copy()
EXPECTED_HEADERS_WITH_CHANGED_API_KEY["X-API-Key"] = "some_key"
COMMAND_WITH_FILTER_WITH_STATE = ["deployments", "list", "--state", "Stopped"]
LIST_WITH_FILTER_REQUEST_JSON = {"filter": {"where": {"and": [{"state": "Stopped"}]}}}
LIST_WITH_FILTER_RESPONSE_JSON_WHEN_NO_DEPLOYMENTS_FOUND = {"deploymentList": [], "total": 17, "displayTotal": 0,
"runningTotal": 0}
DETAILS_STDOUT = """+-----------+-----------------+----------------------------------------------------------------------------------+---------------+---------------------------+
| Name | ID | Endpoint | Api Type | Deployment Type |
+-----------+-----------------+----------------------------------------------------------------------------------+---------------+---------------------------+
| some_name | dev61ity7lx232 | https://development-services.paperspace.io/model-serving/dev61ity7lx232:predict | some_api_type | Tensorflow Serving on K8s |
| some_name | desanw1jptk7woh | https://development-services.paperspace.io/model-serving/desanw1jptk7woh:predict | REST | Tensorflow Serving on K8s |
| some_name | desfnnrqt1v633v | https://development-services.paperspace.io/model-serving/desfnnrqt1v633v:predict | REST | Tensorflow Serving on K8s |
| some_name | desdyn55d2e02su | https://development-services.paperspace.io/model-serving/desdyn55d2e02su:predict | REST | Tensorflow Serving on K8s |
| some_name | des3tmqa3s627o9 | https://development-services.paperspace.io/model-serving/des3tmqa3s627o9:predict | REST | Tensorflow Serving on K8s |
+-----------+-----------------+----------------------------------------------------------------------------------+---------------+---------------------------+
"""
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_and_print_list_of_deployments(self, get_patched):
get_patched.return_value = MockResponse(self.LIST_JSON, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=None)
assert result.output == self.DETAILS_STDOUT
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_with_custom_api_key_when_api_key_parameter_was_provided(self, get_patched):
get_patched.return_value = MockResponse(self.LIST_JSON, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY)
get_patched.assert_called_once_with(self.URL,
headers=self.EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=None,
params=None)
assert result.output == self.DETAILS_STDOUT
@mock.patch("paperspace.cli.deployments.deployments_commands.common.pydoc")
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_and_paginate_list_when_output_table_len_is_gt_lines_in_terminal(self, get_patched,
pydoc_patched):
list_json = {"deploymentList": self.LIST_JSON["deploymentList"] * 40}
get_patched.return_value = MockResponse(list_json, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=None)
pydoc_patched.pager.assert_called_once()
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_and_print_list_of_deployments_filtered_by_state(self, get_patched):
get_patched.return_value = MockResponse(self.LIST_JSON, 200, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_FILTER_WITH_STATE)
get_patched.assert_called_with("https://api.paperspace.io/deployments/getDeploymentList/",
headers=EXPECTED_HEADERS,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == self.DETAILS_STDOUT
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_send_get_request_and_print_list_of_deployments_filtered_with_state_but_none_found(
self, get_patched):
get_patched.return_value = MockResponse(self.LIST_WITH_FILTER_RESPONSE_JSON_WHEN_NO_DEPLOYMENTS_FOUND, 200,
"fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_FILTER_WITH_STATE)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.LIST_WITH_FILTER_REQUEST_JSON,
params=None)
assert result.output == "No data found\n"
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.get")
def test_should_print_proper_message_when_wrong_api_key_was_used(self, get_patched):
get_patched.return_value = MockResponse({"status": 400, "message": "Invalid API token"},
400)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
get_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=None,
params=None)
assert result.output == "Invalid API token\n"
class TestStartDeployment(object):
URL = "https://api.paperspace.io/deployments/updateDeployment/"
COMMAND = ["deployments", "start",
"--id", "some_id"]
REQUEST_JSON = {"isRunning": True, "id": u"some_id"}
EXPECTED_STDOUT = "Deployment started\n"
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_deployments_start_was_used(self, post_patched):
post_patched.return_value = MockResponse(status_code=204)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.REQUEST_JSON,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
class TestStopDeployment(object):
URL = "https://api.paperspace.io/deployments/updateDeployment/"
COMMAND = ["deployments", "stop",
"--id", "some_id"]
REQUEST_JSON = {"isRunning": False, "id": u"some_id"}
EXPECTED_STDOUT = "Deployment stopped\n"
COMMAND_WITH_API_KEY = [
"deployments", "stop",
"--id", "some_id",
"--apiKey", "some_key",
]
EXPECTED_HEADERS_WITH_CHANGED_API_KEY = paperspace.client.default_headers.copy()
EXPECTED_HEADERS_WITH_CHANGED_API_KEY["X-API-Key"] = "some_key"
RESPONSE_JSON_400 = {"error": {"name": "Error", "status": 400, "message": "Unable to access deployment"}}
EXPECTED_STDOUT_WITH_WRONG_ID = "Unable to access deployment\n"
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_deployments_stop_was_used(self, post_patched):
post_patched.return_value = MockResponse(status_code=204)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.REQUEST_JSON,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_with_custom_api_key_when_api_key_parameter_was_provided(self, post_patched):
post_patched.return_value = MockResponse(status_code=204)
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY)
post_patched.assert_called_once_with(self.URL,
headers=self.EXPECTED_HEADERS_WITH_CHANGED_API_KEY,
json=self.REQUEST_JSON,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT
assert result.exit_code == 0
@mock.patch("paperspace.cli.deployments.deployments_commands.client.requests.post")
def test_should_send_proper_data_and_print_message_when_deployments_stop_used_with_wrong_id(self, post_patched):
post_patched.return_value = MockResponse(self.RESPONSE_JSON_400, 400, "fake content")
runner = CliRunner()
result = runner.invoke(cli.cli, self.COMMAND)
post_patched.assert_called_once_with(self.URL,
headers=EXPECTED_HEADERS,
json=self.REQUEST_JSON,
params=None,
files=None,
data=None)
assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_ID
assert result.exit_code == 0
|
en
| 0.487106
|
+-----------+-----------------+----------------------------------------------------------------------------------+---------------+---------------------------+ | Name | ID | Endpoint | Api Type | Deployment Type | +-----------+-----------------+----------------------------------------------------------------------------------+---------------+---------------------------+ | some_name | dev61ity7lx232 | https://development-services.paperspace.io/model-serving/dev61ity7lx232:predict | some_api_type | Tensorflow Serving on K8s | | some_name | desanw1jptk7woh | https://development-services.paperspace.io/model-serving/desanw1jptk7woh:predict | REST | Tensorflow Serving on K8s | | some_name | desfnnrqt1v633v | https://development-services.paperspace.io/model-serving/desfnnrqt1v633v:predict | REST | Tensorflow Serving on K8s | | some_name | desdyn55d2e02su | https://development-services.paperspace.io/model-serving/desdyn55d2e02su:predict | REST | Tensorflow Serving on K8s | | some_name | des3tmqa3s627o9 | https://development-services.paperspace.io/model-serving/des3tmqa3s627o9:predict | REST | Tensorflow Serving on K8s | +-----------+-----------------+----------------------------------------------------------------------------------+---------------+---------------------------+
| 2.072421
| 2
|
easyCore/Utils/decorators.py
|
easyScience/easyCore
| 2
|
6629698
|
<gh_stars>1-10
# SPDX-FileCopyrightText: 2021 easyCore contributors <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyCore project <https://github.com/easyScience/easyCore>
__author__ = 'github.com/wardsimon'
__version__ = '0.1.0'
import collections
import functools
from time import time
from easyCore import borg
class memoized:
"""
Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def counted(func):
"""
Counts how many times a function has been called and adds a `func.calls` to it's properties
:param func: Function to be counted
:return: Results from function call
"""
def wrapped(*args, **kwargs):
wrapped.calls += 1
return func(*args, **kwargs)
wrapped.calls = 0
return wrapped
def time_it(func):
"""
Times a function and reports the time either to the class' log or the base logger
:param func: function to be timed
:return: callable function with timer
"""
name = func.__module__ + '.' + func.__name__
time_logger = borg.log.getLogger('timer.' + name)
@functools.wraps(func)
def _time_it(*args, **kwargs):
start = int(round(time() * 1000))
try:
return func(*args, **kwargs)
finally:
end_ = int(round(time() * 1000)) - start
time_logger.debug(f"\033[1;34;49mExecution time: {end_ if end_ > 0 else 0} ms\033[0m")
return _time_it
|
# SPDX-FileCopyrightText: 2021 easyCore contributors <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyCore project <https://github.com/easyScience/easyCore>
__author__ = 'github.com/wardsimon'
__version__ = '0.1.0'
import collections
import functools
from time import time
from easyCore import borg
class memoized:
"""
Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def counted(func):
"""
Counts how many times a function has been called and adds a `func.calls` to it's properties
:param func: Function to be counted
:return: Results from function call
"""
def wrapped(*args, **kwargs):
wrapped.calls += 1
return func(*args, **kwargs)
wrapped.calls = 0
return wrapped
def time_it(func):
"""
Times a function and reports the time either to the class' log or the base logger
:param func: function to be timed
:return: callable function with timer
"""
name = func.__module__ + '.' + func.__name__
time_logger = borg.log.getLogger('timer.' + name)
@functools.wraps(func)
def _time_it(*args, **kwargs):
start = int(round(time() * 1000))
try:
return func(*args, **kwargs)
finally:
end_ = int(round(time() * 1000)) - start
time_logger.debug(f"\033[1;34;49mExecution time: {end_ if end_ > 0 else 0} ms\033[0m")
return _time_it
|
en
| 0.818908
|
# SPDX-FileCopyrightText: 2021 easyCore contributors <<EMAIL>> # SPDX-License-Identifier: BSD-3-Clause # © 2021 Contributors to the easyCore project <https://github.com/easyScience/easyCore> Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). # uncacheable. a list, for instance. # better to not cache than blow up. Return the function's docstring. Support instance methods. Counts how many times a function has been called and adds a `func.calls` to it's properties :param func: Function to be counted :return: Results from function call Times a function and reports the time either to the class' log or the base logger :param func: function to be timed :return: callable function with timer
| 2.505838
| 3
|
project/tests/apis/v1/test_customer.py
|
rvaccari/sbgo
| 0
|
6629699
|
import json
import pytest
import responses
from app.config import get_settings
class TestCustomerPost:
def test_post_customer_success(self, test_app, customer_payload):
response = test_app.post(
"/api/v1/customers/", data=json.dumps(customer_payload)
)
response.raise_for_status()
assert response.status_code == 201
assert customer_payload.items() <= response.json().items()
def test_post_customer_no_payload(self, test_app):
response = test_app.post("/api/v1/customers/")
assert response.status_code == 422
class TestCustomerOffer:
@pytest.mark.asyncio
@responses.activate
def test_get_offers_from_customer(
self, test_app, mocked_responses, customer, partner_offers_payload
):
settings = get_settings()
responses.add(
responses.POST,
url=f"{settings.partner_host}/offers",
json=partner_offers_payload,
status=200,
)
url = f"/api/v1/customers/{customer.id}/offers/"
response = test_app.get(url)
response.raise_for_status()
assert response.status_code == 200
|
import json
import pytest
import responses
from app.config import get_settings
class TestCustomerPost:
def test_post_customer_success(self, test_app, customer_payload):
response = test_app.post(
"/api/v1/customers/", data=json.dumps(customer_payload)
)
response.raise_for_status()
assert response.status_code == 201
assert customer_payload.items() <= response.json().items()
def test_post_customer_no_payload(self, test_app):
response = test_app.post("/api/v1/customers/")
assert response.status_code == 422
class TestCustomerOffer:
@pytest.mark.asyncio
@responses.activate
def test_get_offers_from_customer(
self, test_app, mocked_responses, customer, partner_offers_payload
):
settings = get_settings()
responses.add(
responses.POST,
url=f"{settings.partner_host}/offers",
json=partner_offers_payload,
status=200,
)
url = f"/api/v1/customers/{customer.id}/offers/"
response = test_app.get(url)
response.raise_for_status()
assert response.status_code == 200
|
none
| 1
| 2.27769
| 2
|
|
j1939/name.py
|
FilipZZZ/python-can-j1939
| 0
|
6629700
|
class Name:
"""The Name of one Controller Application.
The Name consists of 64 bit:
1-bit Arbitrary Address Capable
Indicate the capability to solve address conflicts.
Set to 1 if the device is Arbitrary Address Capable, set to 0 if
it's Single Address Capable.
3-bit Industry Group
One of the predefined J1939 industry groups.
4-bit Vehicle System Instance
Instance number of a vehicle system to distinguish two or more
device with the same Vehicle System number in the same J1939
network.
The first instance is assigned to the instance number 0.
7-bit Vehicle System
A subcomponent of a vehicle, that includes one or more J1939
segments and may be connected or disconnected from the vehicle.
A Vehicle System may be made of one or more functions. The Vehicle
System depends on the Industry Group definition.
1-bit Reserved
This field is reserved for future use by SAE.
8-bit Function
One of the predefined J1939 functions. The same function value
(upper 128 only) may mean different things for different Industry
Groups or Vehicle Systems.
5-bit Function Instance
Instance number of a function to distinguish two or more devices
with the same function number in the same J1939 network.
The first instance is assigned to the instance number 0.
3-bit ECU Instance
Identify the ECU instance if multiple ECUs are involved in
performing a single function. Normally set to 0.
11-bit Manufacturer Code
One of the predefined J1939 manufacturer codes.
21-bit Identity Number
A unique number which identifies the particular device in a
manufacturer specific way.
"""
class IndustryGroup:
Global = 0
OnHighway = 1
AgriculturalAndForestry = 2
Construction = 3
Marine = 4
Industrial = 5
def __init__(self, **kwargs):
"""
:param value:
64-bit value the address should be extracted from
:param bytes:
Array of 8 bytes containing the name object as binary representation.
:param arbitrary_address_capable:
1-bit Arbitrary Address Capable
Indicate the capability to solve address conflicts.
Set to 1 if the device is Arbitrary Address Capable, set to 0 if
it's Single Address Capable.
:param industry_group:
3-bit Industry Group
One of the predefined J1939 industry groups.
:param vehicle_system_instance:
4-bit Vehicle System Instance
Instance number of a vehicle system to distinguish two or more
device with the same Vehicle System number in the same J1939
network.
The first instance is assigned to the instance number 0.
:param vehicle_system:
7-bit Vehicle System
A subcomponent of a vehicle, that includes one or more J1939
segments and may be connected or disconnected from the vehicle.
A Vehicle System may be made of one or more functions. The Vehicle
System depends on the Industry Group definition.
:param function:
8-bit Function
One of the predefined J1939 functions. The same function value
(upper 128 only) may mean different things for different Industry
Groups or Vehicle Systems.
:param function_instance:
5-bit Function Instance
Instance number of a function to distinguish two or more devices
with the same function number in the same J1939 network.
The first instance is assigned to the instance number 0.
:param ecu_instance:
3-bit ECU Instance
Identify the ECU instance if multiple ECUs are involved in
performing a single function. Normally set to 0.
:param manufacturer_code:
11-bit Manufacturer Code
One of the predefined J1939 manufacturer codes.
:param identity_number:
21-bit Identity Number
A unique number which identifies the particular device in a
manufacturer specific way.
"""
if 'value' in kwargs:
self.value = kwargs['value']
elif 'bytes' in kwargs:
self.bytes = kwargs['bytes']
else:
self.arbitrary_address_capable = kwargs.get('arbitrary_address_capable', False)
if (self.arbitrary_address_capable < 0) or (self.arbitrary_address_capable > 1):
raise ValueError("Length of arbitrary address capable incorrect")
self.industry_group = kwargs.get('industry_group', Name.IndustryGroup.Global)
if (self.industry_group < 0) or (self.industry_group > ((2 ** 3) - 1)):
raise ValueError("Length of industry group incorrect")
self.vehicle_system_instance = kwargs.get('vehicle_system_instance', 0)
if (self.vehicle_system_instance < 0) or (self.vehicle_system_instance > ((2 ** 4) - 1)):
raise ValueError("Length of vehicle system instance incorrect")
self.vehicle_system = kwargs.get('vehicle_system', 0)
if (self.vehicle_system < 0) or (self.vehicle_system > ((2 ** 7) - 1)):
raise ValueError("Length of vehicle system incorrect")
self.function = kwargs.get('function', 0)
if (self.function < 0) or (self.function > ((2 ** 8) - 1)):
raise ValueError("Length of function incorrect")
self.function_instance = kwargs.get('function_instance', 0)
if (self.function_instance < 0) or (self.function_instance > ((2 ** 5) - 1)):
raise ValueError("Length of function instance incorrect")
self.ecu_instance = kwargs.get('ecu_instance', 0)
if (self.ecu_instance < 0) or (self.ecu_instance > ((2 ** 3) - 1)):
raise ValueError("Length of ecu instance incorrect")
self.manufacturer_code = kwargs.get('manufacturer_code', 0)
if (self.manufacturer_code < 0) or (self.manufacturer_code > ((2 ** 11) - 1)):
raise ValueError("Length of manufacturer code incorrect")
self.identity_number = kwargs.get('identity_number', 0)
if (self.identity_number < 0) or (self.identity_number > ((2 ** 21) - 1)):
raise ValueError("Length of identity number incorrect")
self.reserved_bit = 0
@property
def arbitrary_address_capable(self):
return self.__arbitrary_address_capable
@arbitrary_address_capable.setter
def arbitrary_address_capable(self, value):
self.__arbitrary_address_capable = value
@property
def industry_group(self):
return self.__industry_group
@industry_group.setter
def industry_group(self, value):
self.__industry_group = value
@property
def vehicle_system_instance(self):
return self.__vehicle_system_instance
@vehicle_system_instance.setter
def vehicle_system_instance(self, value):
self.__vehicle_system_instance = value
@property
def vehicle_system(self):
return self.__vehicle_system
@vehicle_system.setter
def vehicle_system(self, value):
self.__vehicle_system = value
@property
def reserved_bit(self):
return self.__reserved_bit
@reserved_bit.setter
def reserved_bit(self, value):
self.__reserved_bit = value
@property
def function(self):
return self.__function
@function.setter
def function(self, value):
self.__function = value
@property
def function_instance(self):
return self.__function_instance
@function_instance.setter
def function_instance(self, value):
self.__function_instance = value
@property
def ecu_instance(self):
return self.__ecu_instance
@ecu_instance.setter
def ecu_instance(self, value):
self.__ecu_instance = value
@property
def manufacturer_code(self):
return self.__manufacturer_code
@manufacturer_code.setter
def manufacturer_code(self, value):
self.__manufacturer_code = value
@property
def identity_number(self):
return self.__identity_number
@identity_number.setter
def identity_number(self, value):
self.__identity_number = value
@property
def value(self):
retval = self.identity_number
retval += (self.manufacturer_code << 21)
retval += (self.ecu_instance << 32)
retval += (self.function_instance << 35)
retval += (self.function << 40)
retval += (self.reserved_bit << 48)
retval += (self.vehicle_system << 49)
retval += (self.vehicle_system_instance << 56)
retval += (self.industry_group << 60)
retval += (self.arbitrary_address_capable << 63)
return retval
@value.setter
def value(self, value):
self.identity_number = value & ((2 ** 21) - 1)
print(value, self.identity_number )
self.manufacturer_code = (value >> 21) & ((2 ** 11) - 1)
self.ecu_instance = (value >> 32) & ((2 ** 3) - 1)
self.function_instance = (value >> 35) & ((2 ** 5) - 1)
self.function = (value >> 40) & ((2 ** 8) - 1)
self.reserved_bit = (value >> 48) & 1
self.vehicle_system = (value >> 49) & ((2 ** 7) - 1)
self.vehicle_system_instance = (value >> 56) & ((2 ** 4) - 1)
self.industry_group = (value >> 60) & ((2 ** 3) - 1)
self.arbitrary_address_capable = (value >> 63) & 1
@property
def bytes(self):
"""Get the Name object as 8 Byte Data"""
return [
((self.value >> 0) & 0xFF),
((self.value >> 8) & 0xFF),
((self.value >> 16) & 0xFF),
((self.value >> 24) & 0xFF),
((self.value >> 32) & 0xFF),
((self.value >> 40) & 0xFF),
((self.value >> 48) & 0xFF),
((self.value >> 56) & 0xFF)
]
@bytes.setter
def bytes(self, value):
self.value = int.from_bytes(value, byteorder='little', signed=False)
|
class Name:
"""The Name of one Controller Application.
The Name consists of 64 bit:
1-bit Arbitrary Address Capable
Indicate the capability to solve address conflicts.
Set to 1 if the device is Arbitrary Address Capable, set to 0 if
it's Single Address Capable.
3-bit Industry Group
One of the predefined J1939 industry groups.
4-bit Vehicle System Instance
Instance number of a vehicle system to distinguish two or more
device with the same Vehicle System number in the same J1939
network.
The first instance is assigned to the instance number 0.
7-bit Vehicle System
A subcomponent of a vehicle, that includes one or more J1939
segments and may be connected or disconnected from the vehicle.
A Vehicle System may be made of one or more functions. The Vehicle
System depends on the Industry Group definition.
1-bit Reserved
This field is reserved for future use by SAE.
8-bit Function
One of the predefined J1939 functions. The same function value
(upper 128 only) may mean different things for different Industry
Groups or Vehicle Systems.
5-bit Function Instance
Instance number of a function to distinguish two or more devices
with the same function number in the same J1939 network.
The first instance is assigned to the instance number 0.
3-bit ECU Instance
Identify the ECU instance if multiple ECUs are involved in
performing a single function. Normally set to 0.
11-bit Manufacturer Code
One of the predefined J1939 manufacturer codes.
21-bit Identity Number
A unique number which identifies the particular device in a
manufacturer specific way.
"""
class IndustryGroup:
Global = 0
OnHighway = 1
AgriculturalAndForestry = 2
Construction = 3
Marine = 4
Industrial = 5
def __init__(self, **kwargs):
"""
:param value:
64-bit value the address should be extracted from
:param bytes:
Array of 8 bytes containing the name object as binary representation.
:param arbitrary_address_capable:
1-bit Arbitrary Address Capable
Indicate the capability to solve address conflicts.
Set to 1 if the device is Arbitrary Address Capable, set to 0 if
it's Single Address Capable.
:param industry_group:
3-bit Industry Group
One of the predefined J1939 industry groups.
:param vehicle_system_instance:
4-bit Vehicle System Instance
Instance number of a vehicle system to distinguish two or more
device with the same Vehicle System number in the same J1939
network.
The first instance is assigned to the instance number 0.
:param vehicle_system:
7-bit Vehicle System
A subcomponent of a vehicle, that includes one or more J1939
segments and may be connected or disconnected from the vehicle.
A Vehicle System may be made of one or more functions. The Vehicle
System depends on the Industry Group definition.
:param function:
8-bit Function
One of the predefined J1939 functions. The same function value
(upper 128 only) may mean different things for different Industry
Groups or Vehicle Systems.
:param function_instance:
5-bit Function Instance
Instance number of a function to distinguish two or more devices
with the same function number in the same J1939 network.
The first instance is assigned to the instance number 0.
:param ecu_instance:
3-bit ECU Instance
Identify the ECU instance if multiple ECUs are involved in
performing a single function. Normally set to 0.
:param manufacturer_code:
11-bit Manufacturer Code
One of the predefined J1939 manufacturer codes.
:param identity_number:
21-bit Identity Number
A unique number which identifies the particular device in a
manufacturer specific way.
"""
if 'value' in kwargs:
self.value = kwargs['value']
elif 'bytes' in kwargs:
self.bytes = kwargs['bytes']
else:
self.arbitrary_address_capable = kwargs.get('arbitrary_address_capable', False)
if (self.arbitrary_address_capable < 0) or (self.arbitrary_address_capable > 1):
raise ValueError("Length of arbitrary address capable incorrect")
self.industry_group = kwargs.get('industry_group', Name.IndustryGroup.Global)
if (self.industry_group < 0) or (self.industry_group > ((2 ** 3) - 1)):
raise ValueError("Length of industry group incorrect")
self.vehicle_system_instance = kwargs.get('vehicle_system_instance', 0)
if (self.vehicle_system_instance < 0) or (self.vehicle_system_instance > ((2 ** 4) - 1)):
raise ValueError("Length of vehicle system instance incorrect")
self.vehicle_system = kwargs.get('vehicle_system', 0)
if (self.vehicle_system < 0) or (self.vehicle_system > ((2 ** 7) - 1)):
raise ValueError("Length of vehicle system incorrect")
self.function = kwargs.get('function', 0)
if (self.function < 0) or (self.function > ((2 ** 8) - 1)):
raise ValueError("Length of function incorrect")
self.function_instance = kwargs.get('function_instance', 0)
if (self.function_instance < 0) or (self.function_instance > ((2 ** 5) - 1)):
raise ValueError("Length of function instance incorrect")
self.ecu_instance = kwargs.get('ecu_instance', 0)
if (self.ecu_instance < 0) or (self.ecu_instance > ((2 ** 3) - 1)):
raise ValueError("Length of ecu instance incorrect")
self.manufacturer_code = kwargs.get('manufacturer_code', 0)
if (self.manufacturer_code < 0) or (self.manufacturer_code > ((2 ** 11) - 1)):
raise ValueError("Length of manufacturer code incorrect")
self.identity_number = kwargs.get('identity_number', 0)
if (self.identity_number < 0) or (self.identity_number > ((2 ** 21) - 1)):
raise ValueError("Length of identity number incorrect")
self.reserved_bit = 0
@property
def arbitrary_address_capable(self):
return self.__arbitrary_address_capable
@arbitrary_address_capable.setter
def arbitrary_address_capable(self, value):
self.__arbitrary_address_capable = value
@property
def industry_group(self):
return self.__industry_group
@industry_group.setter
def industry_group(self, value):
self.__industry_group = value
@property
def vehicle_system_instance(self):
return self.__vehicle_system_instance
@vehicle_system_instance.setter
def vehicle_system_instance(self, value):
self.__vehicle_system_instance = value
@property
def vehicle_system(self):
return self.__vehicle_system
@vehicle_system.setter
def vehicle_system(self, value):
self.__vehicle_system = value
@property
def reserved_bit(self):
return self.__reserved_bit
@reserved_bit.setter
def reserved_bit(self, value):
self.__reserved_bit = value
@property
def function(self):
return self.__function
@function.setter
def function(self, value):
self.__function = value
@property
def function_instance(self):
return self.__function_instance
@function_instance.setter
def function_instance(self, value):
self.__function_instance = value
@property
def ecu_instance(self):
return self.__ecu_instance
@ecu_instance.setter
def ecu_instance(self, value):
self.__ecu_instance = value
@property
def manufacturer_code(self):
return self.__manufacturer_code
@manufacturer_code.setter
def manufacturer_code(self, value):
self.__manufacturer_code = value
@property
def identity_number(self):
return self.__identity_number
@identity_number.setter
def identity_number(self, value):
self.__identity_number = value
@property
def value(self):
retval = self.identity_number
retval += (self.manufacturer_code << 21)
retval += (self.ecu_instance << 32)
retval += (self.function_instance << 35)
retval += (self.function << 40)
retval += (self.reserved_bit << 48)
retval += (self.vehicle_system << 49)
retval += (self.vehicle_system_instance << 56)
retval += (self.industry_group << 60)
retval += (self.arbitrary_address_capable << 63)
return retval
@value.setter
def value(self, value):
self.identity_number = value & ((2 ** 21) - 1)
print(value, self.identity_number )
self.manufacturer_code = (value >> 21) & ((2 ** 11) - 1)
self.ecu_instance = (value >> 32) & ((2 ** 3) - 1)
self.function_instance = (value >> 35) & ((2 ** 5) - 1)
self.function = (value >> 40) & ((2 ** 8) - 1)
self.reserved_bit = (value >> 48) & 1
self.vehicle_system = (value >> 49) & ((2 ** 7) - 1)
self.vehicle_system_instance = (value >> 56) & ((2 ** 4) - 1)
self.industry_group = (value >> 60) & ((2 ** 3) - 1)
self.arbitrary_address_capable = (value >> 63) & 1
@property
def bytes(self):
"""Get the Name object as 8 Byte Data"""
return [
((self.value >> 0) & 0xFF),
((self.value >> 8) & 0xFF),
((self.value >> 16) & 0xFF),
((self.value >> 24) & 0xFF),
((self.value >> 32) & 0xFF),
((self.value >> 40) & 0xFF),
((self.value >> 48) & 0xFF),
((self.value >> 56) & 0xFF)
]
@bytes.setter
def bytes(self, value):
self.value = int.from_bytes(value, byteorder='little', signed=False)
|
en
| 0.845114
|
The Name of one Controller Application. The Name consists of 64 bit: 1-bit Arbitrary Address Capable Indicate the capability to solve address conflicts. Set to 1 if the device is Arbitrary Address Capable, set to 0 if it's Single Address Capable. 3-bit Industry Group One of the predefined J1939 industry groups. 4-bit Vehicle System Instance Instance number of a vehicle system to distinguish two or more device with the same Vehicle System number in the same J1939 network. The first instance is assigned to the instance number 0. 7-bit Vehicle System A subcomponent of a vehicle, that includes one or more J1939 segments and may be connected or disconnected from the vehicle. A Vehicle System may be made of one or more functions. The Vehicle System depends on the Industry Group definition. 1-bit Reserved This field is reserved for future use by SAE. 8-bit Function One of the predefined J1939 functions. The same function value (upper 128 only) may mean different things for different Industry Groups or Vehicle Systems. 5-bit Function Instance Instance number of a function to distinguish two or more devices with the same function number in the same J1939 network. The first instance is assigned to the instance number 0. 3-bit ECU Instance Identify the ECU instance if multiple ECUs are involved in performing a single function. Normally set to 0. 11-bit Manufacturer Code One of the predefined J1939 manufacturer codes. 21-bit Identity Number A unique number which identifies the particular device in a manufacturer specific way. :param value: 64-bit value the address should be extracted from :param bytes: Array of 8 bytes containing the name object as binary representation. :param arbitrary_address_capable: 1-bit Arbitrary Address Capable Indicate the capability to solve address conflicts. Set to 1 if the device is Arbitrary Address Capable, set to 0 if it's Single Address Capable. :param industry_group: 3-bit Industry Group One of the predefined J1939 industry groups. :param vehicle_system_instance: 4-bit Vehicle System Instance Instance number of a vehicle system to distinguish two or more device with the same Vehicle System number in the same J1939 network. The first instance is assigned to the instance number 0. :param vehicle_system: 7-bit Vehicle System A subcomponent of a vehicle, that includes one or more J1939 segments and may be connected or disconnected from the vehicle. A Vehicle System may be made of one or more functions. The Vehicle System depends on the Industry Group definition. :param function: 8-bit Function One of the predefined J1939 functions. The same function value (upper 128 only) may mean different things for different Industry Groups or Vehicle Systems. :param function_instance: 5-bit Function Instance Instance number of a function to distinguish two or more devices with the same function number in the same J1939 network. The first instance is assigned to the instance number 0. :param ecu_instance: 3-bit ECU Instance Identify the ECU instance if multiple ECUs are involved in performing a single function. Normally set to 0. :param manufacturer_code: 11-bit Manufacturer Code One of the predefined J1939 manufacturer codes. :param identity_number: 21-bit Identity Number A unique number which identifies the particular device in a manufacturer specific way. Get the Name object as 8 Byte Data
| 2.953475
| 3
|
parser/cmds/predict.py
|
Jacob-Zhou/stack-parser
| 3
|
6629701
|
<filename>parser/cmds/predict.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from parser import BiaffineParser, Model
from parser.utils import Corpus
from parser.utils.data import TextDataset, batchify
import torch
class Predict(object):
def add_subparser(self, name, parser):
subparser = parser.add_parser(
name, help='Use a trained model to make predictions.'
)
subparser.add_argument('--batch-size', default=5000, type=int,
help='batch size')
subparser.add_argument('--fdata', default='data/conll09/test.conllx',
help='path to dataset')
subparser.add_argument('--fpred', default='pred.conllx',
help='path to predicted result')
return subparser
def __call__(self, config):
print("Load the model")
vocab = torch.load(config.vocab)
parser = BiaffineParser.load(config.model)
model = Model(config, vocab, parser)
print("Load the dataset")
corpus = Corpus.load(config.fdata)
dataset = TextDataset(vocab.numericalize(corpus, True, False))
# set the data loader
loader = batchify(dataset, config.batch_size)
print("Make predictions on the dataset")
corpus.tags, corpus.heads, corpus.rels = model.predict(loader)
print(f"Save the predicted result to {config.fpred}")
corpus.save(config.fpred)
|
<filename>parser/cmds/predict.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from parser import BiaffineParser, Model
from parser.utils import Corpus
from parser.utils.data import TextDataset, batchify
import torch
class Predict(object):
def add_subparser(self, name, parser):
subparser = parser.add_parser(
name, help='Use a trained model to make predictions.'
)
subparser.add_argument('--batch-size', default=5000, type=int,
help='batch size')
subparser.add_argument('--fdata', default='data/conll09/test.conllx',
help='path to dataset')
subparser.add_argument('--fpred', default='pred.conllx',
help='path to predicted result')
return subparser
def __call__(self, config):
print("Load the model")
vocab = torch.load(config.vocab)
parser = BiaffineParser.load(config.model)
model = Model(config, vocab, parser)
print("Load the dataset")
corpus = Corpus.load(config.fdata)
dataset = TextDataset(vocab.numericalize(corpus, True, False))
# set the data loader
loader = batchify(dataset, config.batch_size)
print("Make predictions on the dataset")
corpus.tags, corpus.heads, corpus.rels = model.predict(loader)
print(f"Save the predicted result to {config.fpred}")
corpus.save(config.fpred)
|
en
| 0.690618
|
# -*- coding: utf-8 -*- # set the data loader
| 2.632547
| 3
|
app/blueprints/api/schemas/search.py
|
deb17/moneycare
| 0
|
6629702
|
<filename>app/blueprints/api/schemas/search.py
import calendar
import marshmallow as ma
from flask_jwt_extended import verify_jwt_in_request, get_jwt_identity
from .expense import ExpenseUpdateSchema
from app.models import PaymentMode, Budget
def get_mode(obj):
return obj.payment_mode.mode
def set_mode(data):
verify_jwt_in_request()
user_id = get_jwt_identity()
mode = data
payment_mode = PaymentMode.query \
.filter_by(user_id=user_id, mode=mode).first()
if not payment_mode:
raise ma.ValidationError('Payment mode does not exist.')
return payment_mode.id
def get_estimate(obj):
if obj.budget_id:
return obj.estimate.item
return None
def set_estimate(data):
if not data:
return None
verify_jwt_in_request()
user_id = get_jwt_identity()
item = data
estimate = Budget.query \
.filter_by(user_id=user_id, item=item).first()
if not estimate:
raise ma.ValidationError('Estimate does not exist.')
return estimate.id
def get_tags(obj):
taglist = ','.join([tag.tagname for tag in obj.tags])
return taglist
def set_month(data):
month_list = list(calendar.month_abbr)
month_names = data.split(',')
try:
month_indexes = [month_list.index(name.title()) for name in
month_names]
except Exception:
raise ma.ValidationError('Month name should be abbreviated.')
return month_indexes
def validate_year(year_str):
try:
year = int(year_str)
if not (1900 < year < 2100):
raise ValueError
except Exception:
raise ma.ValidationError('Year is invalid')
class SearchSchema(ExpenseUpdateSchema):
payment_mode = ma.fields.Function(
serialize=get_mode,
deserialize=set_mode
)
estimate = ma.fields.Function(
serialize=get_estimate,
deserialize=set_estimate
)
tags = ma.fields.Function(
serialize=get_tags
)
year = ma.fields.Str(validate=[ma.validate.Length(min=4, max=4),
validate_year])
month = ma.fields.Function(deserialize=set_month)
exact_date = ma.fields.Date()
from_date = ma.fields.Date()
to_date = ma.fields.Date()
amt_cond = ma.fields.Str(missing='==')
amt_min = ma.fields.Decimal(as_string=True)
amt_max = ma.fields.Decimal(as_string=True)
text = ma.fields.Str(validate=ma.validate.Length(max=512))
simple_search = ma.fields.Bool(missing=False)
@ma.validates_schema
def validate_amt_min(self, data, **kwargs):
if data.get('amount') and data.get('amt_min'):
raise ma.ValidationError('Both amount and amt_min cannot be '
'specified.')
@ma.validates_schema
def validate_amt_max(self, data, **kwargs):
if data.get('amount') and data.get('amt_max'):
raise ma.ValidationError('Both amount and amt_max cannot be '
'specified.')
@ma.validates_schema
def validate_from_date(self, data, **kwargs):
if data.get('exact_date') and data.get('from_date'):
raise ma.ValidationError('Both exact date and from-date cannot be '
'specified.')
@ma.validates_schema
def validate_to_date(self, data, **kwargs):
if data.get('exact_date') and data.get('to_date'):
raise ma.ValidationError('Both exact date and to-date cannot be '
'specified.')
|
<filename>app/blueprints/api/schemas/search.py
import calendar
import marshmallow as ma
from flask_jwt_extended import verify_jwt_in_request, get_jwt_identity
from .expense import ExpenseUpdateSchema
from app.models import PaymentMode, Budget
def get_mode(obj):
return obj.payment_mode.mode
def set_mode(data):
verify_jwt_in_request()
user_id = get_jwt_identity()
mode = data
payment_mode = PaymentMode.query \
.filter_by(user_id=user_id, mode=mode).first()
if not payment_mode:
raise ma.ValidationError('Payment mode does not exist.')
return payment_mode.id
def get_estimate(obj):
if obj.budget_id:
return obj.estimate.item
return None
def set_estimate(data):
if not data:
return None
verify_jwt_in_request()
user_id = get_jwt_identity()
item = data
estimate = Budget.query \
.filter_by(user_id=user_id, item=item).first()
if not estimate:
raise ma.ValidationError('Estimate does not exist.')
return estimate.id
def get_tags(obj):
taglist = ','.join([tag.tagname for tag in obj.tags])
return taglist
def set_month(data):
month_list = list(calendar.month_abbr)
month_names = data.split(',')
try:
month_indexes = [month_list.index(name.title()) for name in
month_names]
except Exception:
raise ma.ValidationError('Month name should be abbreviated.')
return month_indexes
def validate_year(year_str):
try:
year = int(year_str)
if not (1900 < year < 2100):
raise ValueError
except Exception:
raise ma.ValidationError('Year is invalid')
class SearchSchema(ExpenseUpdateSchema):
payment_mode = ma.fields.Function(
serialize=get_mode,
deserialize=set_mode
)
estimate = ma.fields.Function(
serialize=get_estimate,
deserialize=set_estimate
)
tags = ma.fields.Function(
serialize=get_tags
)
year = ma.fields.Str(validate=[ma.validate.Length(min=4, max=4),
validate_year])
month = ma.fields.Function(deserialize=set_month)
exact_date = ma.fields.Date()
from_date = ma.fields.Date()
to_date = ma.fields.Date()
amt_cond = ma.fields.Str(missing='==')
amt_min = ma.fields.Decimal(as_string=True)
amt_max = ma.fields.Decimal(as_string=True)
text = ma.fields.Str(validate=ma.validate.Length(max=512))
simple_search = ma.fields.Bool(missing=False)
@ma.validates_schema
def validate_amt_min(self, data, **kwargs):
if data.get('amount') and data.get('amt_min'):
raise ma.ValidationError('Both amount and amt_min cannot be '
'specified.')
@ma.validates_schema
def validate_amt_max(self, data, **kwargs):
if data.get('amount') and data.get('amt_max'):
raise ma.ValidationError('Both amount and amt_max cannot be '
'specified.')
@ma.validates_schema
def validate_from_date(self, data, **kwargs):
if data.get('exact_date') and data.get('from_date'):
raise ma.ValidationError('Both exact date and from-date cannot be '
'specified.')
@ma.validates_schema
def validate_to_date(self, data, **kwargs):
if data.get('exact_date') and data.get('to_date'):
raise ma.ValidationError('Both exact date and to-date cannot be '
'specified.')
|
none
| 1
| 2.35167
| 2
|
|
neuro_extras/__init__.py
|
neuro-inc/neuro-extras
| 2
|
6629703
|
from neuro_sdk import PluginManager
from .main import main # noqa
from .version import __version__ # noqa
def setup_plugin(manager: PluginManager) -> None:
manager.config.define_str("extra", "remote-project-dir")
|
from neuro_sdk import PluginManager
from .main import main # noqa
from .version import __version__ # noqa
def setup_plugin(manager: PluginManager) -> None:
manager.config.define_str("extra", "remote-project-dir")
|
uz
| 0.443564
|
# noqa # noqa
| 1.380608
| 1
|
evaluation_ifip_networking_2018/evaluation.py
|
vnep-approx/evaluation-ifip-networking-2018
| 2
|
6629704
|
# MIT License
#
# Copyright (c) 2016-2018 <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""This is the evaluation and plotting module.
This module handles all plotting related evaluation.
"""
import os
import pickle
import sys
from collections import namedtuple
from itertools import combinations, product
from time import gmtime, strftime
import matplotlib
import matplotlib.patheffects as PathEffects
from matplotlib import font_manager
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
from alib import solutions, util
REQUIRED_FOR_PICKLE = solutions # this prevents pycharm from removing this import, which is required for unpickling solutions
OUTPUT_PATH = None
OUTPUT_FILETYPE = "png"
logger = util.get_logger(__name__, make_file=False, propagate=True)
class HeatmapPlotType(object):
Simple_MCF = 0 #a plot only for ClassicMCFResult data
Simple_RRT = 1 #a plot only for RandomizedRoundingTriumvirate data
Comparison_MCF_vs_RRT = 2 #a plot comparing ClassicMCFResult with RandomizedRoundingTriumvirate
VALUE_RANGE = range(Simple_MCF, Comparison_MCF_vs_RRT+1)
"""
Collection of heatmap plot specifications. Each specification corresponds to a specific plot and describes all essential
information:
- name: the title of the plot
- filename: prefix of the files to be generated
- plot_type: A HeatmapPlotType describing which data is required as input.
- vmin and vmax: minimum and maximum value for the heatmap
- cmap: the colormap that is to be used for the heatmap
- lookup_function: which of the values shall be plotted. the input is a tuple consisting of a baseline and a randomized rounding
solution. The function must return a numeric value or NaN
- metric filter: after having applied the lookup_function (returning a numeric value or NaN) the metric_filter is
applied (if given) and values not matching this function are discarded.
- rounding_function: the function that is applied for displaying the mean values in the heatmap plots
- colorbar_ticks: the tick values (numeric) for the heatmap plot
"""
heatmap_specification_obj = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Objective Gap [%]",
filename="objective_gap",
vmin=0.0,
vmax=16.0,
colorbar_ticks=[x for x in range(0,17,2)],
cmap="Blues",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: mcf_result.status.objGap * 100,
metric_filter=lambda obj: (obj >= -0.00001)
)
heatmap_specification_runtime = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Runtime [min]",
filename="runtime",
vmin=0,
vmax=180,
colorbar_ticks=[x for x in range(0,181,20)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: mcf_result.temporal_log.log_entries[-1].globaltime / 60.0,
rounding_function=lambda x: int(round(x))
)
heatmap_specification_embedding_ratio = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Acceptance Ratio [%]",
filename="embedding_ratio",
vmin=0.0,
vmax=100.0,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Greens",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: mcf_result.embedding_ratio * 100.0,
)
heatmap_specification_embedding_ratio_cleaned = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: #Embedded / #Feasible [%] ",
filename="cleaned_embedding_ratio",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Greens",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=(lambda mcf_result:
((mcf_result.embedding_ratio * mcf_result.original_number_requests / mcf_result.nu_real_req) * 100) if mcf_result.nu_real_req > 0.5
else np.NaN)
)
heatmap_specification_nu_real_req = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: #Feasible Requests",
filename="real_req",
vmin=0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Greens",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: mcf_result.nu_real_req,
)
heatmap_specification_average_node_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Avg. Node Load [%]",
filename="avg_node_load",
vmin=0.0,
vmax=60,
colorbar_ticks=[x for x in range(0,61,10)],
cmap="Oranges",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_average_node_load(mcf_result),
)
heatmap_specification_average_edge_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Avg. Edge Load [%]",
filename="avg_edge_load",
vmin=0.0,
vmax=30,
colorbar_ticks=[x for x in range(0,31,5)],
cmap="Purples",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_average_edge_load(mcf_result),
)
heatmap_specification_max_node_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Max. Node Load [%]",
filename="max_node_load",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Oranges",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_max_node_load(mcf_result),
)
heatmap_specification_max_edge_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Max. Edge Load [%]",
filename="max_edge_load",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Purples",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_max_edge_load(mcf_result)
)
heatmap_specification_max_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: MaxLoad (Edge and Node)",
filename="max_load",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Reds",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_max_load(mcf_result),
)
heatmap_specification_avg_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: AvgLoad (Edge and Node)",
filename="avg_load",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Reds",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_avg_load(mcf_result),
)
heatmap_specification_runtime_randround_preprocessing = dict(
name="$\\mathrm{LP}_{\\mathrm{novel}}$: Runtime Pre-Processing[s]",
filename="randround_runtime_pre",
vmin=0,
vmax=60,
colorbar_ticks=[x for x in range(0,61,10)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_RRT,
lookup_function=lambda rrt_result: rrt_result.meta_data.time_preprocessing
)
heatmap_specification_runtime_randround_optimization = dict(
name="$\\mathrm{LP}_{\\mathrm{novel}}$: Runtime LP [min]",
filename="randround_runtime_opt",
vmin=0,
vmax=180,
colorbar_ticks=[x for x in range(0,181,20)],
cmap="Greys",
lookup_function=lambda rrt_result: rrt_result.meta_data.time_optimization / 60.0,
plot_type=HeatmapPlotType.Simple_RRT,
rounding_function=lambda x: int(round(x))
)
heatmap_specification_runtime_randround_postprocessing = dict(
name="$\\mathrm{LP}_{\\mathrm{novel}}$: Runtime Post-Processing [s]",
filename="randround_runtime_post",
vmin=0,
vmax=60,
colorbar_ticks=[x for x in range(0,61,10)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_RRT,
lookup_function=lambda rrt_result: rrt_result.meta_data.time_postprocessing
)
heatmap_specification_runtime_randround_runtime = dict(
name="$\\mathrm{LP}_{\\mathrm{novel}}$: Total Runtime [min]",
filename="randround_runtime_total",
vmin=0,
vmax=10,
colorbar_ticks=[x for x in range(0,11,2)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_RRT,
lookup_function=lambda rrt_result: (rrt_result.meta_data.time_preprocessing +
rrt_result.meta_data.time_optimization +
rrt_result.meta_data.time_postprocessing) / 60.0,
rounding_function=lambda x: "{0:.2f}".format(x)
)
heatmap_specification_runtime_mdk_runtime = dict(
name="Runtime MDK [s]",
filename="mdk_runtime_total",
vmin=0,
vmax=60,
colorbar_ticks=[x for x in range(0, 61, 10)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_RRT,
lookup_function=lambda rrt_result: rrt_result.mdk_meta_data.time_preprocessing +
rrt_result.mdk_meta_data.time_optimization +
rrt_result.mdk_meta_data.time_postprocessing,
)
heatmap_specification_comparison_baseline_rr_mdk = dict(
name="Heuristic Rounding Performance \n$\mathrm{Profit}({\mathrm{RR}_{\mathrm{MDK}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ",
filename="comparison_baseline_rr_mdk",
vmin=50.0,
vmax=100,
colorbar_ticks=[x for x in range(50,101,10)],
cmap="Reds",
plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
lookup_function=lambda mcf_result, rrt_result: (
(rrt_result.mdk_result.profit / mcf_result.status.objValue) * 100 if mcf_result.status.objValue > 0.000001
else np.NaN)
)
heatmap_specification_comparison_baseline_rr_heuristic = dict(
name="Heuristic Rounding Performance \n$\mathrm{Profit}({\mathrm{RR}_{\mathrm{Heuristic}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ",
filename="comparison_baseline_rr_heuristic",
vmin=50.0,
vmax=100,
colorbar_ticks=[x for x in range(50,101,10)],
cmap="Reds",
plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
lookup_function=lambda mcf_result, rrt_result: (
(rrt_result.result_wo_violations.profit / mcf_result.status.objValue) * 100 if mcf_result.status.objValue > 0.000001
else np.NaN)
)
heatmap_specification_comparison_baseline_rr_min_load = dict(
name="Heuristic Rounding Performance \n$\mathrm{Profit}({\mathrm{RR}_{\mathrm{MinLoad}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ",
filename="comparison_baseline_rr_min_load",
vmin=95.0,
vmax=145.0,
colorbar_ticks=[x for x in range(95,146,10)],
cmap="Reds",
plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
lookup_function=lambda mcf_result, rrt_result: (
(rrt_result.collection_of_samples_with_violations[0].profit / mcf_result.status.objValue) * 100 if mcf_result.status.objValue > 0.000001
else np.NaN),
rounding_function=lambda x: int(round(x))
)
heatmap_specification_comparison_baseline_rr_max_profit = dict(
name="Heuristic Rounding Performance \n$\mathrm{Profit}({\mathrm{RR}_{\mathrm{MaxProfit}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ",
filename="comparison_baseline_rr_max_profit",
vmin=95.0,
vmax=145.0,
colorbar_ticks=[x for x in range(95,146,10)],
cmap="Reds",
plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
lookup_function=lambda mcf_result, rrt_result: (
(rrt_result.collection_of_samples_with_violations[1].profit / mcf_result.status.objValue) * 100 if mcf_result.status.objValue > 0.000001
else np.NaN),
rounding_function=lambda x: int(round(x))
)
global_heatmap_specfications = [
heatmap_specification_max_node_load,
heatmap_specification_max_edge_load,
heatmap_specification_obj,
heatmap_specification_runtime,
heatmap_specification_embedding_ratio,
heatmap_specification_average_node_load,
heatmap_specification_average_edge_load,
heatmap_specification_max_load,
heatmap_specification_avg_load,
heatmap_specification_nu_real_req,
heatmap_specification_embedding_ratio_cleaned,
heatmap_specification_runtime_randround_preprocessing,
heatmap_specification_runtime_randround_optimization,
heatmap_specification_runtime_randround_postprocessing,
heatmap_specification_comparison_baseline_rr_mdk,
heatmap_specification_comparison_baseline_rr_heuristic,
heatmap_specification_comparison_baseline_rr_min_load,
heatmap_specification_comparison_baseline_rr_max_profit,
heatmap_specification_runtime_randround_runtime,
heatmap_specification_runtime_mdk_runtime,
]
heatmap_specifications_per_type = {
plot_type_item : [heatmap_specification for heatmap_specification in global_heatmap_specfications if heatmap_specification['plot_type'] == plot_type_item]
for plot_type_item in [HeatmapPlotType.Simple_MCF, HeatmapPlotType.Simple_RRT, HeatmapPlotType.Comparison_MCF_vs_RRT]
}
"""
Axes specifications used for the heatmap plots.
Each specification contains the following elements:
- x_axis_parameter: the parameter name on the x-axis
- y_axis_parameter: the parameter name on the y-axis
- x_axis_title: the legend of the x-axis
- y_axis_title: the legend of the y-axis
- foldername: the folder to store the respective plots in
"""
heatmap_axes_specification_resources = dict(
x_axis_parameter="node_resource_factor",
y_axis_parameter="edge_resource_factor",
x_axis_title="Node Resource Factor",
y_axis_title="Edge Resource Factor",
foldername="AXES_RESOURCES"
)
heatmap_axes_specification_requests_edge_load = dict(
x_axis_parameter="number_of_requests",
y_axis_parameter="edge_resource_factor",
x_axis_title="Number of Requests",
y_axis_title="Edge Resource Factor",
foldername="AXES_NO_REQ_vs_EDGE_RF"
)
heatmap_axes_specification_requests_node_load = dict(
x_axis_parameter="number_of_requests",
y_axis_parameter="node_resource_factor",
x_axis_title="Number of Requests",
y_axis_title="Node Resource Factor",
foldername="AXES_NO_REQ_vs_NODE_RF"
)
global_heatmap_axes_specifications = [heatmap_axes_specification_requests_edge_load,
heatmap_axes_specification_resources,
heatmap_axes_specification_requests_node_load]
def compute_average_node_load(result_summary):
logger.warn("In the function compute_average_node_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x == "universal":
cum_loads.append(result_summary.load[(x, y)])
return np.mean(cum_loads)
def compute_average_edge_load(result_summary):
logger.warn("In the function compute_average_edge_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x != "universal":
cum_loads.append(result_summary.load[(x, y)])
return np.mean(cum_loads)
def compute_max_node_load(result_summary):
logger.warn("In the function compute_max_node_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x == "universal":
cum_loads.append(result_summary.load[(x, y)])
return max(cum_loads)
def compute_max_edge_load(result_summary):
logger.warn("In the function compute_max_edge_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x != "universal":
cum_loads.append(result_summary.load[(x, y)])
return max(cum_loads)
def compute_avg_load(result_summary):
cum_loads = []
for (x, y) in result_summary.load.keys():
cum_loads.append(result_summary.load[(x, y)])
return np.mean(cum_loads)
def compute_max_load(result_summary):
cum_loads = []
for (x, y) in result_summary.load.keys():
cum_loads.append(result_summary.load[(x, y)])
return max(cum_loads)
def select_scenarios_with_high_objective_gap_or_zero_requests(dc_baseline, algorithm_name,
output_respective_generation_parameters=True):
''' Function to select scenarios with high objective gap or no requests. This function is not used anymore but
is left here for future usage.
'''
scenario_ids = dc_baseline.algorithm_scenario_solution_dictionary[algorithm_name].keys()
result = []
for scenario_id in scenario_ids:
scenario_solution = dc_baseline.get_solutions_by_scenario_index(scenario_id)[algorithm_name][0]
scenario_status = scenario_solution.status
if scenario_status.objGap > 100:
result.append(scenario_id)
if output_respective_generation_parameters:
print "Scenario {} has a very high gap, i.e. a gap of {} due to the objective bound being {} and the objective value being {}".format(
scenario_id,
scenario_status.objGap,
scenario_status.objBound,
scenario_status.objValue
)
print "The computation for this scenario took {} seconds.".format(scenario_solution.runtime)
print "This scenario had the following generation parameters:"
generation_parameters = extract_generation_parameters(
dc_baseline.scenario_parameter_container.scenario_parameter_dict, scenario_id
)
for gen_param in generation_parameters:
print "\t {}".format(gen_param)
if scenario_solution.nu_real_req < 0.5:
result.append(scenario_id)
if output_respective_generation_parameters:
print "Scenario {} has doesn't have any reasonable scenarios in it...{}".format(scenario_id,
scenario_status.objGap,
scenario_status.objBound,
scenario_status.objValue)
print "The computation for this scenario took {} seconds.".format(scenario_solution.runtime)
print "This scenario had the following generation parameters:"
generation_parameters = extract_generation_parameters(
dc_baseline.scenario_parameter_container.scenario_parameter_dict, scenario_id
)
for gen_param in generation_parameters:
print "\t {}".format(gen_param)
print "{} many scenarios experienced a very, very high gap or contained 0 requests".format(len(result))
return result
def get_title_for_filter_specifications(filter_specifications):
result = "\n".join(
[filter_specification['parameter'] + "=" + str(filter_specification['value']) + "; " for filter_specification in
filter_specifications])
return result[:-2]
def extract_parameter_range(scenario_parameter_space_dict, key):
if not isinstance(scenario_parameter_space_dict, dict):
return None
for generator_name, value in scenario_parameter_space_dict.iteritems():
if generator_name == key:
return [key], value
if isinstance(value, list):
if len(value) != 1:
continue
value = value[0]
result = extract_parameter_range(value, key)
if result is not None:
path, values = result
return [generator_name, 0] + path, values
elif isinstance(value, dict):
result = extract_parameter_range(value, key)
if result is not None:
path, values = result
return [generator_name] + path, values
return None
def extract_generation_parameters(scenario_parameter_dict, scenario_id):
if not isinstance(scenario_parameter_dict, dict):
return None
results = []
for generator_name, value in scenario_parameter_dict.iteritems():
if isinstance(value, set) and generator_name != "all" and scenario_id in value:
return [[generator_name]]
if isinstance(value, list):
if len(value) != 1:
continue
value = value[0]
result = extract_generation_parameters(value, scenario_id)
if result is not None:
for atomic_result in result:
results.append([generator_name] + atomic_result)
elif isinstance(value, dict):
result = extract_generation_parameters(value, scenario_id)
if result is not None:
for atomic_result in result:
results.append([generator_name] + atomic_result)
if results == []:
return None
else:
# print "returning {}".format(results)
return results
def lookup_scenarios_having_specific_values(scenario_parameter_space_dict, path, value):
current_path = path[:]
current_dict = scenario_parameter_space_dict
while len(current_path) > 0:
if isinstance(current_path[0], basestring):
current_dict = current_dict[current_path[0]]
current_path.pop(0)
elif current_path[0] == 0:
current_path.pop(0)
# print current_dict
return current_dict[value]
def lookup_scenario_parameter_room_dicts_on_path(scenario_parameter_space_dict, path):
current_path = path[:]
current_dict_or_list = scenario_parameter_space_dict
dicts_on_path = []
while len(current_path) > 0:
dicts_on_path.append(current_dict_or_list)
if isinstance(current_path[0], basestring):
current_dict_or_list = current_dict_or_list[current_path[0]]
current_path.pop(0)
elif isinstance(current_path[0], int):
current_dict_or_list = current_dict_or_list[int(current_path[0])]
current_path.pop(0)
else:
raise RuntimeError("Could not lookup dicts.")
return dicts_on_path
def load_reduced_pickle(reduced_pickle):
with open(reduced_pickle, "rb") as f:
data = pickle.load(f)
return data
class AbstractPlotter(object):
''' Abstract Plotter interface providing functionality used by the majority of plotting classes of this module.
'''
def __init__(self,
output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
self.output_path = output_path
self.output_filetype = output_filetype
self.scenario_solution_storage = scenario_solution_storage
self.algorithm_id = algorithm_id
self.execution_id = execution_id
self.scenario_parameter_dict = self.scenario_solution_storage.scenario_parameter_container.scenario_parameter_dict
self.scenarioparameter_room = self.scenario_solution_storage.scenario_parameter_container.scenarioparameter_room
self.all_scenario_ids = set(scenario_solution_storage.algorithm_scenario_solution_dictionary[self.algorithm_id].keys())
self.show_plot = show_plot
self.save_plot = save_plot
self.overwrite_existing_files = overwrite_existing_files
if not forbidden_scenario_ids:
self.forbidden_scenario_ids = set()
else:
self.forbidden_scenario_ids = forbidden_scenario_ids
self.paper_mode=paper_mode
def _construct_output_path_and_filename(self, title, filter_specifications=None):
filter_spec_path = ""
filter_filename = "no_filter.{}".format(OUTPUT_FILETYPE)
if filter_specifications:
filter_spec_path, filter_filename = self._construct_path_and_filename_for_filter_spec(filter_specifications)
base = os.path.normpath(OUTPUT_PATH)
date = strftime("%Y-%m-%d", gmtime())
output_path = os.path.join(base, date, OUTPUT_FILETYPE, "general_plots", filter_spec_path)
filename = os.path.join(output_path, title + "_" + filter_filename)
return output_path, filename
def _construct_path_and_filename_for_filter_spec(self, filter_specifications):
filter_path = ""
filter_filename = ""
for spec in filter_specifications:
filter_path = os.path.join(filter_path, (spec['parameter'] + "_" + str(spec['value'])))
filter_filename += spec['parameter'] + "_" + str(spec['value']) + "_"
filter_filename = filter_filename[:-1] + "." + OUTPUT_FILETYPE
return filter_path, filter_filename
def _obtain_scenarios_based_on_filters(self, filter_specifications=None):
allowed_scenario_ids = set(self.all_scenario_ids)
sps = self.scenarioparameter_room
spd = self.scenario_parameter_dict
if filter_specifications:
for filter_specification in filter_specifications:
filter_path, _ = extract_parameter_range(sps, filter_specification['parameter'])
filter_indices = lookup_scenarios_having_specific_values(spd, filter_path,
filter_specification['value'])
allowed_scenario_ids = allowed_scenario_ids & filter_indices
return allowed_scenario_ids
def _obtain_scenarios_based_on_axis(self, axis_path, axis_value):
spd = self.scenario_parameter_dict
return lookup_scenarios_having_specific_values(spd, axis_path, axis_value)
def _show_and_or_save_plots(self, output_path, filename):
plt.tight_layout()
if self.save_plot:
if not os.path.exists(output_path):
os.makedirs(output_path)
print "saving plot: {}".format(filename)
plt.savefig(filename)
if self.show_plot:
plt.show()
plt.close()
def plot_figure(self, filter_specifications):
raise RuntimeError("This is an abstract method")
class SingleHeatmapPlotter(AbstractPlotter):
def __init__(self,
output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
heatmap_plot_type,
list_of_axes_specifications = global_heatmap_axes_specifications,
list_of_metric_specifications = None,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
super(SingleHeatmapPlotter, self).__init__(output_path, output_filetype, scenario_solution_storage,
algorithm_id, execution_id, show_plot, save_plot,
overwrite_existing_files, forbidden_scenario_ids, paper_mode)
if heatmap_plot_type is None or heatmap_plot_type not in HeatmapPlotType.VALUE_RANGE:
raise RuntimeError("heatmap_plot_type {} is not a valid input. Must be of type HeatmapPlotType.".format(heatmap_plot_type))
self.heatmap_plot_type = heatmap_plot_type
if not list_of_axes_specifications:
raise RuntimeError("Axes need to be provided.")
self.list_of_axes_specifications = list_of_axes_specifications
if not list_of_metric_specifications:
self.list_of_metric_specifications = heatmap_specifications_per_type[self.heatmap_plot_type]
else:
for metric_specification in list_of_metric_specifications:
if metric_specification.plot_type != self.heatmap_plot_type:
raise RuntimeError("The metric specification {} does not agree with the plot type {}.".format(metric_specification, self.heatmap_plot_type))
self.list_of_metric_specifications = list_of_metric_specifications
def _construct_output_path_and_filename(self, metric_specification, heatmap_axes_specification, filter_specifications=None):
filter_spec_path = ""
filter_filename = "no_filter.{}".format(OUTPUT_FILETYPE)
if filter_specifications:
filter_spec_path, filter_filename = self._construct_path_and_filename_for_filter_spec(filter_specifications)
base = os.path.normpath(OUTPUT_PATH)
date = strftime("%Y-%m-%d", gmtime())
axes_foldername = heatmap_axes_specification['foldername']
output_path = os.path.join(base, date, OUTPUT_FILETYPE, axes_foldername, filter_spec_path)
filename = os.path.join(output_path, metric_specification['filename'] + "_" + filter_filename)
return output_path, filename
def plot_figure(self, filter_specifications):
for axes_specification in self.list_of_axes_specifications:
for metric_specfication in self.list_of_metric_specifications:
self.plot_single_heatmap_general(metric_specfication, axes_specification, filter_specifications)
def _lookup_solutions(self, scenario_ids):
return [(self.scenario_solution_storage.get_solutions_by_scenario_index(x)[self.algorithm_id][self.execution_id],) for x in scenario_ids]
def plot_single_heatmap_general(self,
heatmap_metric_specification,
heatmap_axes_specification,
filter_specifications=None):
# data extraction
sps = self.scenarioparameter_room
spd = self.scenario_parameter_dict
output_path, filename = self._construct_output_path_and_filename(heatmap_metric_specification,
heatmap_axes_specification,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
#check if filter specification conflicts with axes specification
if filter_specifications is not None:
for filter_specification in filter_specifications:
if (heatmap_axes_specification['x_axis_parameter'] == filter_specification['parameter'] or
heatmap_axes_specification['y_axis_parameter'] == filter_specification['parameter']):
logger.debug("Skipping generation of {} as the filter specification conflicts with the axes specification.")
return
path_x_axis, xaxis_parameters = extract_parameter_range(sps, heatmap_axes_specification['x_axis_parameter'])
path_y_axis, yaxis_parameters = extract_parameter_range(sps, heatmap_axes_specification['y_axis_parameter'])
# for heatmap plot
xaxis_parameters.sort()
yaxis_parameters.sort()
# all heatmap values will be stored in X
X = np.zeros((len(yaxis_parameters), len(xaxis_parameters)))
column_labels = yaxis_parameters
row_labels = xaxis_parameters
fig, ax = plt.subplots(figsize=(5, 4))
min_number_of_observed_values = 10000000000000
max_number_of_observed_values = 0
observed_values = np.empty(0)
for x_index, x_val in enumerate(xaxis_parameters):
# all scenario indices which has x_val as xaxis parameter (e.g. node_resource_factor = 0.5
scenario_ids_matching_x_axis = lookup_scenarios_having_specific_values(spd, path_x_axis, x_val)
for y_index, y_val in enumerate(yaxis_parameters):
scenario_ids_matching_y_axis = lookup_scenarios_having_specific_values(spd, path_y_axis, y_val)
filter_indices = self._obtain_scenarios_based_on_filters(filter_specifications)
scenario_ids_to_consider = (scenario_ids_matching_x_axis &
scenario_ids_matching_y_axis &
filter_indices) - self.forbidden_scenario_ids
solutions = self._lookup_solutions(scenario_ids_to_consider)
values = [heatmap_metric_specification['lookup_function'](*solution) for solution in solutions]
if 'metric_filter' in heatmap_metric_specification:
values = [value for value in values if heatmap_metric_specification['metric_filter'](value)]
observed_values = np.append(observed_values, values)
if len(values) < min_number_of_observed_values:
min_number_of_observed_values = len(values)
if len(values) > max_number_of_observed_values:
max_number_of_observed_values = len(values)
logger.debug("values are {}".format(values))
m = np.nanmean(values)
logger.debug("mean is {}".format(m))
if 'rounding_function' in heatmap_metric_specification:
rounded_m = heatmap_metric_specification['rounding_function'](m)
else:
rounded_m = float("{0:.1f}".format(round(m, 2)))
plt.text(x_index + .5,
y_index + .45,
rounded_m,
verticalalignment="center",
horizontalalignment="center",
fontsize=17.5,
fontname="Courier New",
# family="monospace",
color='w',
path_effects=[PathEffects.withStroke(linewidth=4, foreground="k")]
)
X[y_index, x_index] = rounded_m
if min_number_of_observed_values == max_number_of_observed_values:
solution_count_string = "{} values per square".format(min_number_of_observed_values)
else:
solution_count_string = "between {} and {} values per square".format(min_number_of_observed_values,
max_number_of_observed_values)
if self.paper_mode:
ax.set_title(heatmap_metric_specification['name'], fontsize=17)
else:
title = heatmap_metric_specification['name'] + "\n"
if filter_specifications:
title += get_title_for_filter_specifications(filter_specifications) + "\n"
title += solution_count_string + "\n"
title += "min: {:.2f}; mean: {:.2f}; max: {:.2f}".format(np.nanmin(observed_values),
np.nanmean(observed_values),
np.nanmax(observed_values))
ax.set_title(title)
heatmap = ax.pcolor(X,
cmap=heatmap_metric_specification['cmap'],
vmin=heatmap_metric_specification['vmin'],
vmax=heatmap_metric_specification['vmax'])
if not self.paper_mode:
fig.colorbar(heatmap, label=heatmap_metric_specification['name'] + ' - mean in blue')
else:
ticks = heatmap_metric_specification['colorbar_ticks']
tick_labels = [str(tick).ljust(3) for tick in ticks]
cbar = fig.colorbar(heatmap)
cbar.set_ticks(ticks)
cbar.set_ticklabels(tick_labels)
#for label in cbar.ax.get_yticklabels():
# label.set_fontproperties(font_manager.FontProperties(family="Courier New",weight='bold'))
cbar.ax.tick_params(labelsize=15.5)
ax.set_yticks(np.arange(X.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(X.shape[1]) + 0.5, minor=False)
ax.set_xticklabels(row_labels, minor=False, fontsize=15.5)
ax.set_xlabel(heatmap_axes_specification['x_axis_title'], fontsize=16)
ax.set_ylabel(heatmap_axes_specification['y_axis_title'], fontsize=16)
ax.set_yticklabels(column_labels, minor=False, fontsize=15.5)
self._show_and_or_save_plots(output_path, filename)
class ComparisonHeatmapPlotter(SingleHeatmapPlotter):
def __init__(self,
output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
other_scenario_solution_storage,
other_algorithm_id,
other_execution_id,
heatmap_plot_type,
list_of_axes_specifications = global_heatmap_axes_specifications,
list_of_metric_specifications = None,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
super(ComparisonHeatmapPlotter, self).__init__(output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
heatmap_plot_type,
list_of_axes_specifications,
list_of_metric_specifications,
show_plot,
save_plot,
overwrite_existing_files,
forbidden_scenario_ids,
paper_mode)
self.other_scenario_solution_storage = other_scenario_solution_storage
self.other_algorithm_id = other_algorithm_id
self.other_execution_id = other_execution_id
if heatmap_plot_type != HeatmapPlotType.Comparison_MCF_vs_RRT:
raise RuntimeError("Only comparison heatmap plots are allowed")
def _lookup_solutions(self, scenario_ids):
return [(self.scenario_solution_storage.get_solutions_by_scenario_index(x)[self.algorithm_id][self.execution_id],
self.other_scenario_solution_storage.get_solutions_by_scenario_index(x)[self.other_algorithm_id][self.other_execution_id])
for x in scenario_ids]
class ComparisonBaselineVsRRT_Scatter_and_ECDF(AbstractPlotter):
def __init__(self,
output_path,
output_filetype,
baseline_solution_storage,
baseline_algorithm_id,
baseline_execution_id,
randround_solution_storage,
randround_algorithm_id,
randround_execution_id,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
super(ComparisonBaselineVsRRT_Scatter_and_ECDF, self).__init__(output_path, output_filetype, baseline_solution_storage,
baseline_algorithm_id, baseline_execution_id, show_plot, save_plot,
overwrite_existing_files, forbidden_scenario_ids, paper_mode)
if randround_algorithm_id != "RandomizedRoundingTriumvirate":
raise RuntimeError("The capacity violation plot can only be applied to RandomizedRoundingTriumvirate results.")
self.randround_solution_storage = randround_solution_storage
self.randround_algorithm_id = randround_algorithm_id
self.randround_execution_id = randround_execution_id
self._randround_data_names = ['min_aug', 'max_profit', 'wo_viol', 'mdk']
self._randround_data_names_with_baseline = ['min_aug', 'max_profit', 'wo_viol', 'mdk', "baseline"]
self.label_names = {'min_aug': "min. augmentation",
'max_profit': "max. profit",
'wo_viol': "rounding w/o augmentation",
'mdk': "multi-dimensional knapsack",
'baseline': "baseline"}
self.math_label_names = {'min_aug': "\mathrm{RR}_{\mathrm{MinLoad}}",
'max_profit': "\mathrm{RR}_{\mathrm{MaxProfit}}",
'wo_viol': "\mathrm{RR}_{\mathrm{Heuristic}}",
'mdk': "\mathrm{RR}_{\mathrm{MDK}}",
'baseline': "\mathrm{MIP}_{\mathrm{MCF}}"}
self.markers = {'min_aug': "o",
'max_profit': "v",
'wo_viol': "x",
'mdk': "+",
'baseline': "^"}
self.colors = {'min_aug': "salmon",
'max_profit': "darkred",
'wo_viol': "g",
'mdk': "b",
'baseline': "k"}
self._randround_data_lookups = {'min_aug': (lambda x: x.collection_of_samples_with_violations[0]),
'max_profit': (lambda x: x.collection_of_samples_with_violations[1]),
'wo_viol': (lambda x: x.result_wo_violations),
'mdk': (lambda x: x.mdk_result)}
filter_path_number_of_requests, list_number_of_requests = extract_parameter_range(self.scenarioparameter_room, "number_of_requests")
self._number_of_requests_list = list_number_of_requests
self._filter_path_number_of_requests = filter_path_number_of_requests
self._nan_dict = {randround_data_name : np.NaN for randround_data_name in self._randround_data_names}
self._profit_result_data_list = {randround_data_name: np.NaN for randround_data_name in
self._randround_data_names}
self._profit_result = self._nan_dict
self._load_result = {randround_data_name: [np.NaN, np.NaN] for randround_data_name in self._randround_data_names}
def _lookup_baseline_solution(self, scenario_id):
return self.scenario_solution_storage.get_solutions_by_scenario_index(scenario_id)[self.algorithm_id][self.execution_id]
def _lookup_randround_solution(self, scenario_id):
return self.randround_solution_storage.get_solutions_by_scenario_index(scenario_id)[self.randround_algorithm_id][self.randround_execution_id]
def _compute_profits_relative_to_baseline(self, baseline_solution, randround_solution):
baseline_objective = baseline_solution.status.objValue
if baseline_objective > 0.00001:
self._profit_result = self._profit_result_data_list
for randround_data_name in self._randround_data_names:
randround_solution_for_data_name = self._randround_data_lookups[randround_data_name](randround_solution)
self._profit_result[randround_data_name] = (randround_solution_for_data_name.profit / baseline_objective)*100.0
else:
logger.warn(
"The baseline objective of is zero. discarding value.")
self._profit_result = self._nan_dict
def _compute_maximal_load_for_randround(self, randround_solution):
for randround_data_name in self._randround_data_names:
randround_solution_for_data_name = self._randround_data_lookups[randround_data_name](randround_solution)
self._load_result[randround_data_name][0] = randround_solution_for_data_name.max_node_load * 100.0
self._load_result[randround_data_name][1] = randround_solution_for_data_name.max_edge_load * 100.0
def _extract_first_dual_bound_from_baseline_solution(self, baseline_solution):
log_time_root = 100000000000
root_entry = baseline_solution.temporal_log.root_relaxation_entry
root_entry_dual_bound = -(10 ** 80)
first_log_entry_dual_bound = -(10 ** 80)
if root_entry is not None:
root_entry_dual_bound = root_entry.data.objective_bound
else:
logger.debug("The root entry is none...")
first_log_entry = baseline_solution.temporal_log.log_entries[0]
if first_log_entry is not None:
first_log_entry_dual_bound = first_log_entry.data.objective_bound
else:
logger.debug("The first entry of the temporal log is none...")
result = max(root_entry_dual_bound, first_log_entry_dual_bound)
if result < -(10 **40):
logger.warn("The dual bound of the MIP is garbage. discarding it.")
return np.nan
else:
return result
def _extract_final_dual_bound_from_baseline_solution(self, baseline_solution):
best_bnd = (10 **80)
for log_entry in baseline_solution.temporal_log.log_entries:
if log_entry.data.objective_bound < best_bnd:
best_bnd = log_entry.data.objective_bound
if best_bnd > 10**70:
logger.warn("Best bound of MIP could not be determined.")
return np.NaN
else:
return best_bnd
def _compute_relative_dual_bound_to_randround_ROOT(self, baseline_solution, randround_solution):
baseline_dual_bound = self._extract_first_dual_bound_from_baseline_solution(baseline_solution)
randround_dual_bound = randround_solution.meta_data.status.objValue
if randround_dual_bound > 0.0001:
result = baseline_dual_bound / randround_dual_bound
if result > 1000:
logger.warn("The relative dual bound {} is very high. It's a result from {} {}. discarding it.".format(result, baseline_dual_bound, randround_dual_bound))
return np.nan
return result
else:
logger.warn(
"The randround dual bound is zero. discarding value.")
return np.NaN
def _compute_relative_dual_bound_to_randround_FINAL(self, baseline_solution, randround_solution):
baseline_dual_bound = self._extract_final_dual_bound_from_baseline_solution(baseline_solution)
randround_dual_bound = randround_solution.meta_data.status.objValue
if randround_dual_bound > 0.0001:
result = baseline_dual_bound / randround_dual_bound
if result > 1000:
logger.warn("The relative dual bound {} is very high. It's a result from {} {}. discarding it.".format(result, baseline_dual_bound, randround_dual_bound))
return np.nan
return result
else:
logger.warn(
"The randround dual bound is zero. discarding value.")
return np.NaN
def compute_relative_profits_arrays(self, list_of_scenarios):
number_of_entries = len(list_of_scenarios)
result = {randround_data_name: np.full(number_of_entries, np.nan) for randround_data_name in
self._randround_data_names}
for i, scenario_id in enumerate(list_of_scenarios):
baseline_solution = self._lookup_baseline_solution(scenario_id)
randround_solution = self._lookup_randround_solution(scenario_id)
self._compute_profits_relative_to_baseline(baseline_solution, randround_solution)
for randround_data_name in self._randround_data_names:
result[randround_data_name][i] = self._profit_result[randround_data_name]
return result
def compute_maximal_load_arrays(self, list_of_scenarios):
number_of_entries = len(list_of_scenarios)
result = {data_name: [np.full(number_of_entries, np.nan), np.full(number_of_entries, np.nan)]
for data_name in self._randround_data_names_with_baseline}
for i, scenario_id in enumerate(list_of_scenarios):
baseline_solution = self._lookup_baseline_solution(scenario_id)
randround_solution = self._lookup_randround_solution(scenario_id)
self._compute_maximal_load_for_randround(randround_solution)
for randround_data_name in self._randround_data_names:
result[randround_data_name][0][i] = self._load_result[randround_data_name][0]
result[randround_data_name][1][i] = self._load_result[randround_data_name][1]
result['baseline'][0][i] = compute_max_node_load(baseline_solution)
result['baseline'][1][i] = compute_max_edge_load(baseline_solution)
return result
def compute_dual_bound_array(self, list_of_scenarios):
result = {number_of_requests: None for number_of_requests in
self._number_of_requests_list}
for number_of_requests in self._number_of_requests_list:
scenario_ids_with_right_number_of_requests = self._obtain_scenarios_based_on_filters([{"parameter": "number_of_requests", "value": number_of_requests}])
scenario_ids_with_right_number_of_requests &= set(list_of_scenarios)
result[number_of_requests] = [np.full(len(scenario_ids_with_right_number_of_requests), np.nan), np.full(len(scenario_ids_with_right_number_of_requests), np.nan)]
for i, scenario_id in enumerate(scenario_ids_with_right_number_of_requests):
baseline_solution = self._lookup_baseline_solution(scenario_id)
randround_solution = self._lookup_randround_solution(scenario_id)
result[number_of_requests][0][i] = self._compute_relative_dual_bound_to_randround_ROOT(baseline_solution, randround_solution)
result[number_of_requests][1][i] = self._compute_relative_dual_bound_to_randround_FINAL(baseline_solution, randround_solution)
return result
def plot_figure(self, filter_specifications):
self.plot_figure_ecdf_load(filter_specifications)
self.plot_figure_ecdf_objective(filter_specifications)
self.plot_bound_ecdf(filter_specifications)
self.plot_scatter_obj_vs_load(filter_specifications)
def plot_figure_ecdf_load(self, filter_specifications):
output_filename = "ECDF_load"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
result = self.compute_maximal_load_arrays(scenario_ids)
fix, ax = plt.subplots(figsize=(5, 4))
#cum_line = matplotlib.lines.Line2D([], [], color='k', linestyle="-", label='total')
node_line = matplotlib.lines.Line2D([], [], color='gray', linestyle="-.", label='node')
edge_line = matplotlib.lines.Line2D([], [], color='gray', linestyle="-", label='edge')
second_legend_handlers = []
max_observed_value = 0
for data_name in self._randround_data_names_with_baseline:
#sorted_data_cum = np.sort(np.maximum(result[data_name][0], result[data_name][1]))
sorted_data_node = np.sort(result[data_name][0])
sorted_data_edge = np.sort(result[data_name][1])
max_observed_value = np.maximum(max_observed_value, sorted_data_node[-1])
max_observed_value = np.maximum(max_observed_value, sorted_data_edge[-1])
yvals = np.arange(1,len(sorted_data_node)+1) / float(len(sorted_data_node))
second_legend_handlers.append(matplotlib.lines.Line2D([], [], color=self.colors[data_name], linestyle="-", label="${}$".format(self.math_label_names[data_name])))
#ax.plot(sorted_data_cum, yvals, color=self.colors[data_name], linestyle="-")
ax.plot(sorted_data_node, yvals, color=self.colors[data_name], linestyle="-.")
ax.plot(sorted_data_edge, yvals, color=self.colors[data_name], linestyle="-")
first_legend = plt.legend(handles=[node_line, edge_line], loc=4, fontsize=14, title="Resource", handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(first_legend.get_title(), fontsize=14)
plt.gca().add_artist(first_legend)
second_legend = plt.legend(handles=second_legend_handlers, loc=2, fontsize=14, title="Algorithm", handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(second_legend.get_title(), fontsize=14)
ax.set_xlim(10, max_observed_value * 1.1)
ax.set_xscale("log", basex=10)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xticks([10, 50, 100, 200, 500], minor=False)
ax.set_xticks([20,30,40,50,60,70,80,90, 300,400], minor=True)
ax.set_title("ECDF of Resource Loads",fontsize=17)
ax.set_xlabel("Maximum Resource Load [%]", fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
ax.grid(True, which="both")
ax.tick_params(axis='both', which='major', labelsize=15.5)
ax.tick_params(axis='x', which='minor', labelsize=15.5)
plt.grid(True, which="both")
plt.tight_layout()
self._show_and_or_save_plots(output_path, filename)
def plot_figure_ecdf_objective(self, filter_specifications):
output_filename = "ECDF_objective"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
result = self.compute_relative_profits_arrays(scenario_ids)
fix, ax = plt.subplots(figsize=(5, 4))
max_observed_value = 0
for data_name in self._randround_data_names:
sorted_data = np.sort(result[data_name])
max_observed_value = np.maximum(max_observed_value, sorted_data[-1])
yvals = np.arange(1,len(sorted_data)+1) / float(len(sorted_data))
ax.plot(sorted_data, yvals, color=self.colors[data_name], linestyle="-", label="${}$".format(self.math_label_names[data_name]))
leg = plt.legend(loc=4, title="Algorithm", fontsize=14, handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(leg.get_title(), fontsize=14)
ax.set_title("ECDF of Relative Achieved Profit", fontsize=17)
ax.set_xlabel("$\mathrm{Profit}({\mathrm{RR}_{\mathrm{Alg}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ", fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
ax.grid(True, which="both")
ax.tick_params(axis='both', which='major', labelsize=15.5)
#ax.set_xscale("log", basex=10)
ax.set_xlim(20,max_observed_value*1.1)
plt.tight_layout()
self._show_and_or_save_plots(output_path, filename)
def plot_bound_ecdf(self, filter_specifications):
output_filename = "ECDF_bound"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
if filter_specifications:
for filter_specification in filter_specifications:
if filter_specification["parameter"] == "number_of_requests":
logger.info("Skipping generation of {} as this conflicts with the filter specification {}".format(output_filename, filter_specification))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
result = self.compute_dual_bound_array(scenario_ids)
fix, ax = plt.subplots(figsize=(5, 4))
#ax.set_xscale("log", basex=10)
colors = ['k','g', 'b', 'r']
max_observed_value = 0
number_requests_legend_handlers = []
for i, number_of_requests in enumerate(self._number_of_requests_list):
result_for_requests = result[number_of_requests][0]
sorted_data = np.sort(result_for_requests[~np.isnan(result_for_requests)])
max_observed_value = np.maximum(max_observed_value, sorted_data[-1])
yvals = np.arange(1,len(sorted_data)+1) / float(len(sorted_data))
ax.plot(sorted_data, yvals, color=colors[i], linestyle="-", label="{}".format(number_of_requests), linewidth=1.8)
result_for_requests = result[number_of_requests][1]
sorted_data = np.sort(result_for_requests[~np.isnan(result_for_requests)])
max_observed_value = np.maximum(max_observed_value, sorted_data[-1])
yvals = np.arange(1, len(sorted_data) + 1) / float(len(sorted_data))
ax.plot(sorted_data, yvals, color=colors[i], linestyle=":",
linewidth=2.4)
number_requests_legend_handlers.append(matplotlib.lines.Line2D([], [], color=colors[i], linestyle="-", label='{}'.format(number_of_requests)))
root_legend_handlers = [matplotlib.lines.Line2D([], [], color='gray', linestyle="-", label='initial'), matplotlib.lines.Line2D([], [], color='gray', linestyle=":", label='final')]
first_legend = plt.legend(title="Bound($\mathrm{MIP}_{\mathrm{MCF}})$", handles=root_legend_handlers, loc=(0.225,0.0125), fontsize=14, handletextpad=0.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(first_legend.get_title(), fontsize='15')
plt.gca().add_artist(first_legend)
o_leg = plt.legend(handles=number_requests_legend_handlers, loc=4, title="#Requests", fontsize=14, handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(o_leg.get_title(), fontsize='15')
ax.set_title("$\mathrm{LP}_{\mathrm{novel}}$: Formulation Strength", fontsize=17)
ax.set_xlabel("Bound($\mathrm{MIP}_{\mathrm{MCF}}$) / Bound($\mathrm{LP}_{\mathrm{novel}}$)", fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
ax.set_xlim(0.65,max_observed_value*1.05)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(15.5)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(15.5)
ax.set_xticks([ 1, 1.5, 2, 2.5, 3, 3.5], minor=False)
ax.set_xticks([0.75, 1.25, 1.5, 1.75, 2.25, 2.5, 2.75, 3.25, 3.5], minor=True)
ax.set_yticks([x*0.1 for x in range(1,10)], minor=True)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xticklabels([], minor=True)
ax.grid(True, which="both", linestyle=":")
# gridlines = ax.get_xgridlines() + ax.get_ygridlines()
# for line in gridlines:
# line.set_linestyle(':')
self._show_and_or_save_plots(output_path, filename)
def plot_scatter_obj_vs_load(self, filter_specifications):
bounding_boxes = {'min_aug': [[50,140],[85,235]],
'max_profit': [[95,210],[90,505]],
'wo_viol': [[30 ,105],[75,102]],
'mdk': [[15,105],[75,102]]}
for data_to_plot in self._randround_data_names:
bounding_box_x = bounding_boxes[data_to_plot][0]
bounding_box_y = bounding_boxes[data_to_plot][1]
output_filename = "SCATTER_obj_vs_load_{}".format(data_to_plot)
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
fix, ax = plt.subplots(figsize=(5, 4))
colors = list(self.colors.values())
markers = list(self.markers.values())
filter_path_NRF, node_resource_factors = extract_parameter_range(self.scenarioparameter_room,
"node_resource_factor")
filter_path_ERF, edge_resource_factors = extract_parameter_range(self.scenarioparameter_room,
"edge_resource_factor")
color_norm = matplotlib.colors.Normalize(vmin=0, vmax=6)
scalar_map = matplotlib.cm.ScalarMappable(norm=color_norm, cmap='inferno')
observed_values_relative_profit = np.empty(0)
observed_values_load = np.empty(0)
number_of_not_shown_values = 0
for i, nrf in enumerate(node_resource_factors):
for j, erf in enumerate(edge_resource_factors):
scenario_ids = self._obtain_scenarios_based_on_filters([] +
[
{"parameter": "node_resource_factor",
"value": nrf},
{"parameter": "edge_resource_factor",
"value": erf},
])
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
list_of_scenarios = list(scenario_ids)
result_relative_profits = self.compute_relative_profits_arrays(list_of_scenarios)
raw_result_loads = self.compute_maximal_load_arrays(list_of_scenarios)
result_cum_loads = np.maximum(raw_result_loads[data_to_plot][0],
raw_result_loads[data_to_plot][1])
observed_values_load = np.append(observed_values_load, result_cum_loads)
observed_values_relative_profit = np.append(observed_values_relative_profit,
result_relative_profits[data_to_plot])
for y_value in np.nditer(result_cum_loads):
if y_value < bounding_box_y[0] or y_value > bounding_box_y[1]:
number_of_not_shown_values += 1
for x_value in np.nditer(result_relative_profits[data_to_plot]):
if x_value < bounding_box_x[0] or x_value > bounding_box_x[1]:
number_of_not_shown_values += 1
ax.scatter(result_relative_profits[data_to_plot],
result_cum_loads,
c=matplotlib.colors.to_hex(scalar_map.to_rgba(j)),
marker="s",
label="{}".format(erf),
s=6, linewidths=.1, alpha=.8)
if i == 0:
leg = plt.legend(fontsize=14, markerscale=2, title="ERF", handletextpad=0, borderaxespad=0.175, borderpad=0.2)
for lh in leg.legendHandles:
lh.set_alpha(1.0)
plt.setp(leg.get_title(), fontsize=14)
ax.set_xlim(bounding_box_x)
ax.set_ylim(bounding_box_y)
ax.tick_params(axis='both', which='major', labelsize=15.5)
ax.tick_params(axis='x', which='minor', labelsize=15.5)
plt.grid(True, which="both")
if self.paper_mode:
ax.set_title("Vanilla Rounding Performance", fontsize=17)
else:
title = "Vanilla Rounding Performance\n"
#print observed_values_relative_profit
title += "profit: min: {:.2f}; mean: {:.2f}; max: {:.2f}\n".format(np.nanmin(observed_values_relative_profit),
np.nanmean(observed_values_relative_profit),
np.nanmax(observed_values_relative_profit))
title += "loads: min: {:.2f}; mean: {:.2f}; max: {:.2f}\n".format(np.nanmin(observed_values_load),
np.nanmean(observed_values_load),
np.nanmax(observed_values_load))
title += "{} of {} points lie outside the displayed area".format(number_of_not_shown_values, len(observed_values_relative_profit))
ax.set_title(title, fontsize=10)
xlabel = "$\mathrm{Profit}({" + self.math_label_names[
data_to_plot] + "}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%]"
ax.set_xlabel(xlabel, fontsize=16)
ylabel = "$\mathrm{Max\,Load}\,({" + self.math_label_names[data_to_plot] + "})$ [%]"
ax.set_ylabel(ylabel, fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FormatStrFormatter("%d"))
ax.get_xaxis().set_minor_formatter(matplotlib.ticker.FormatStrFormatter("%d"))
self._show_and_or_save_plots(output_path, filename)
def _construct_filter_specs(scenario_parameter_space_dict, parameter_filter_keys, maxdepth=3):
parameter_value_dic = dict()
for parameter in parameter_filter_keys:
_, parameter_values = extract_parameter_range(scenario_parameter_space_dict,
parameter)
parameter_value_dic[parameter] = parameter_values
# print parameter_value_dic.values()
result_list = [None]
for i in range(1, maxdepth + 1):
for combi in combinations(parameter_value_dic, i):
values = []
for element_of_combi in combi:
values.append(parameter_value_dic[element_of_combi])
for v in product(*values):
filter = []
for (parameter, value) in zip(combi, v):
filter.append({'parameter': parameter, 'value': value})
result_list.append(filter)
return result_list
def evaluate_baseline_and_randround(dc_baseline,
baseline_algorithm_id,
baseline_execution_config,
dc_randround,
randround_algorithm_id,
randround_execution_config,
exclude_generation_parameters=None,
parameter_filter_keys=None,
show_plot=False,
save_plot=True,
overwrite_existing_files=True,
forbidden_scenario_ids=None,
papermode=True,
maxdepthfilter=2,
output_path="./",
output_filetype="png"):
""" Main function for evaluation, creating plots and saving them in a specific directory hierarchy.
A large variety of plots is created. For heatmaps, a generic plotter is used while for general
comparison plots (ECDF and scatter) an own class is used. The plots that shall be generated cannot
be controlled at the moment but the respective plotters can be easily adjusted.
:param dc_baseline: unpickled datacontainer of baseline experiments (e.g. MIP)
:param baseline_algorithm_id: algorithm id of the baseline algorithm
:param baseline_execution_config: execution config (numeric) of the baseline algorithm execution
:param dc_randround: unpickled datacontainer of randomized rounding experiments
:param randround_algorithm_id: algorithm id of the randround algorithm
:param randround_execution_config: execution config (numeric) of the randround algorithm execution
:param exclude_generation_parameters: specific generation parameters that shall be excluded from the evaluation.
These won't show in the plots and will also not be shown on axis labels etc.
:param parameter_filter_keys: name of parameters according to which the results shall be filtered
:param show_plot: Boolean: shall plots be shown
:param save_plot: Boolean: shall the plots be saved
:param overwrite_existing_files: shall existing files be overwritten?
:param forbidden_scenario_ids: list / set of scenario ids that shall not be considered in the evaluation
:param papermode: nicely layouted plots (papermode) or rather additional information?
:param maxdepthfilter: length of filter permutations that shall be considered
:param output_path: path to which the results shall be written
:param output_filetype: filetype supported by matplotlib to export figures
:return: None
"""
if forbidden_scenario_ids is None:
forbidden_scenario_ids = set()
if exclude_generation_parameters is not None:
for key, values_to_exclude in exclude_generation_parameters.iteritems():
parameter_filter_path, parameter_values = extract_parameter_range(
dc_baseline.scenario_parameter_container.scenarioparameter_room, key)
parameter_dicts_baseline = lookup_scenario_parameter_room_dicts_on_path(
dc_baseline.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
parameter_dicts_randround = lookup_scenario_parameter_room_dicts_on_path(
dc_randround.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
for value_to_exclude in values_to_exclude:
if value_to_exclude not in parameter_values:
raise RuntimeError("The value {} is not contained in the list of parameter values {} for key {}".format(
value_to_exclude, parameter_values, key
))
#add respective scenario ids to the set of forbidden scenario ids
forbidden_scenario_ids.update(set(lookup_scenarios_having_specific_values(
dc_baseline.scenario_parameter_container.scenario_parameter_dict, parameter_filter_path, value_to_exclude)))
#remove the respective values from the scenario parameter room such that these are not considered when
#constructing e.g. axes
parameter_dicts_baseline[-1][key] = [value for value in parameter_dicts_baseline[-1][key] if
value not in values_to_exclude]
parameter_dicts_randround[-1][key] = [value for value in parameter_dicts_randround[-1][key] if
value not in values_to_exclude]
if parameter_filter_keys is not None:
filter_specs = _construct_filter_specs(dc_baseline.scenario_parameter_container.scenarioparameter_room,
parameter_filter_keys,
maxdepth=maxdepthfilter)
else:
filter_specs = [None]
#initialize plotters
baseline_plotter = SingleHeatmapPlotter(output_path=output_path,
output_filetype=output_filetype,
scenario_solution_storage=dc_baseline,
algorithm_id=baseline_algorithm_id,
execution_id=baseline_execution_config,
heatmap_plot_type=HeatmapPlotType.Simple_MCF,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
randround_plotter = SingleHeatmapPlotter(output_path=output_path,
output_filetype=output_filetype,
scenario_solution_storage=dc_randround,
algorithm_id=randround_algorithm_id,
execution_id=randround_execution_config,
heatmap_plot_type=HeatmapPlotType.Simple_RRT,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
comparison_plotter = ComparisonHeatmapPlotter(output_path=output_path,
output_filetype=output_filetype,
scenario_solution_storage=dc_baseline,
algorithm_id=baseline_algorithm_id,
execution_id=baseline_execution_config,
other_scenario_solution_storage=dc_randround,
other_algorithm_id=randround_algorithm_id,
other_execution_id=randround_execution_config,
heatmap_plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
ecdf_capacity_violation_plotter = ComparisonBaselineVsRRT_Scatter_and_ECDF(output_path=output_path,
output_filetype=output_filetype,
baseline_solution_storage=dc_baseline,
baseline_algorithm_id=baseline_algorithm_id,
baseline_execution_id=baseline_execution_config,
randround_solution_storage=dc_randround,
randround_algorithm_id=randround_algorithm_id,
randround_execution_id=randround_execution_config,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
plotters = [ecdf_capacity_violation_plotter, baseline_plotter, randround_plotter, comparison_plotter]
for filter_spec in filter_specs:
for plotter in plotters:
plotter.plot_figure(filter_spec)
|
# MIT License
#
# Copyright (c) 2016-2018 <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""This is the evaluation and plotting module.
This module handles all plotting related evaluation.
"""
import os
import pickle
import sys
from collections import namedtuple
from itertools import combinations, product
from time import gmtime, strftime
import matplotlib
import matplotlib.patheffects as PathEffects
from matplotlib import font_manager
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
from alib import solutions, util
REQUIRED_FOR_PICKLE = solutions # this prevents pycharm from removing this import, which is required for unpickling solutions
OUTPUT_PATH = None
OUTPUT_FILETYPE = "png"
logger = util.get_logger(__name__, make_file=False, propagate=True)
class HeatmapPlotType(object):
Simple_MCF = 0 #a plot only for ClassicMCFResult data
Simple_RRT = 1 #a plot only for RandomizedRoundingTriumvirate data
Comparison_MCF_vs_RRT = 2 #a plot comparing ClassicMCFResult with RandomizedRoundingTriumvirate
VALUE_RANGE = range(Simple_MCF, Comparison_MCF_vs_RRT+1)
"""
Collection of heatmap plot specifications. Each specification corresponds to a specific plot and describes all essential
information:
- name: the title of the plot
- filename: prefix of the files to be generated
- plot_type: A HeatmapPlotType describing which data is required as input.
- vmin and vmax: minimum and maximum value for the heatmap
- cmap: the colormap that is to be used for the heatmap
- lookup_function: which of the values shall be plotted. the input is a tuple consisting of a baseline and a randomized rounding
solution. The function must return a numeric value or NaN
- metric filter: after having applied the lookup_function (returning a numeric value or NaN) the metric_filter is
applied (if given) and values not matching this function are discarded.
- rounding_function: the function that is applied for displaying the mean values in the heatmap plots
- colorbar_ticks: the tick values (numeric) for the heatmap plot
"""
heatmap_specification_obj = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Objective Gap [%]",
filename="objective_gap",
vmin=0.0,
vmax=16.0,
colorbar_ticks=[x for x in range(0,17,2)],
cmap="Blues",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: mcf_result.status.objGap * 100,
metric_filter=lambda obj: (obj >= -0.00001)
)
heatmap_specification_runtime = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Runtime [min]",
filename="runtime",
vmin=0,
vmax=180,
colorbar_ticks=[x for x in range(0,181,20)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: mcf_result.temporal_log.log_entries[-1].globaltime / 60.0,
rounding_function=lambda x: int(round(x))
)
heatmap_specification_embedding_ratio = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Acceptance Ratio [%]",
filename="embedding_ratio",
vmin=0.0,
vmax=100.0,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Greens",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: mcf_result.embedding_ratio * 100.0,
)
heatmap_specification_embedding_ratio_cleaned = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: #Embedded / #Feasible [%] ",
filename="cleaned_embedding_ratio",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Greens",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=(lambda mcf_result:
((mcf_result.embedding_ratio * mcf_result.original_number_requests / mcf_result.nu_real_req) * 100) if mcf_result.nu_real_req > 0.5
else np.NaN)
)
heatmap_specification_nu_real_req = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: #Feasible Requests",
filename="real_req",
vmin=0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Greens",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: mcf_result.nu_real_req,
)
heatmap_specification_average_node_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Avg. Node Load [%]",
filename="avg_node_load",
vmin=0.0,
vmax=60,
colorbar_ticks=[x for x in range(0,61,10)],
cmap="Oranges",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_average_node_load(mcf_result),
)
heatmap_specification_average_edge_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Avg. Edge Load [%]",
filename="avg_edge_load",
vmin=0.0,
vmax=30,
colorbar_ticks=[x for x in range(0,31,5)],
cmap="Purples",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_average_edge_load(mcf_result),
)
heatmap_specification_max_node_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Max. Node Load [%]",
filename="max_node_load",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Oranges",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_max_node_load(mcf_result),
)
heatmap_specification_max_edge_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: Max. Edge Load [%]",
filename="max_edge_load",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Purples",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_max_edge_load(mcf_result)
)
heatmap_specification_max_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: MaxLoad (Edge and Node)",
filename="max_load",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Reds",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_max_load(mcf_result),
)
heatmap_specification_avg_load = dict(
name="$\\mathrm{MIP}_{\\mathrm{MCF}}$: AvgLoad (Edge and Node)",
filename="avg_load",
vmin=0.0,
vmax=100,
colorbar_ticks=[x for x in range(0,101,20)],
cmap="Reds",
plot_type=HeatmapPlotType.Simple_MCF,
lookup_function=lambda mcf_result: compute_avg_load(mcf_result),
)
heatmap_specification_runtime_randround_preprocessing = dict(
name="$\\mathrm{LP}_{\\mathrm{novel}}$: Runtime Pre-Processing[s]",
filename="randround_runtime_pre",
vmin=0,
vmax=60,
colorbar_ticks=[x for x in range(0,61,10)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_RRT,
lookup_function=lambda rrt_result: rrt_result.meta_data.time_preprocessing
)
heatmap_specification_runtime_randround_optimization = dict(
name="$\\mathrm{LP}_{\\mathrm{novel}}$: Runtime LP [min]",
filename="randround_runtime_opt",
vmin=0,
vmax=180,
colorbar_ticks=[x for x in range(0,181,20)],
cmap="Greys",
lookup_function=lambda rrt_result: rrt_result.meta_data.time_optimization / 60.0,
plot_type=HeatmapPlotType.Simple_RRT,
rounding_function=lambda x: int(round(x))
)
heatmap_specification_runtime_randround_postprocessing = dict(
name="$\\mathrm{LP}_{\\mathrm{novel}}$: Runtime Post-Processing [s]",
filename="randround_runtime_post",
vmin=0,
vmax=60,
colorbar_ticks=[x for x in range(0,61,10)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_RRT,
lookup_function=lambda rrt_result: rrt_result.meta_data.time_postprocessing
)
heatmap_specification_runtime_randround_runtime = dict(
name="$\\mathrm{LP}_{\\mathrm{novel}}$: Total Runtime [min]",
filename="randround_runtime_total",
vmin=0,
vmax=10,
colorbar_ticks=[x for x in range(0,11,2)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_RRT,
lookup_function=lambda rrt_result: (rrt_result.meta_data.time_preprocessing +
rrt_result.meta_data.time_optimization +
rrt_result.meta_data.time_postprocessing) / 60.0,
rounding_function=lambda x: "{0:.2f}".format(x)
)
heatmap_specification_runtime_mdk_runtime = dict(
name="Runtime MDK [s]",
filename="mdk_runtime_total",
vmin=0,
vmax=60,
colorbar_ticks=[x for x in range(0, 61, 10)],
cmap="Greys",
plot_type=HeatmapPlotType.Simple_RRT,
lookup_function=lambda rrt_result: rrt_result.mdk_meta_data.time_preprocessing +
rrt_result.mdk_meta_data.time_optimization +
rrt_result.mdk_meta_data.time_postprocessing,
)
heatmap_specification_comparison_baseline_rr_mdk = dict(
name="Heuristic Rounding Performance \n$\mathrm{Profit}({\mathrm{RR}_{\mathrm{MDK}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ",
filename="comparison_baseline_rr_mdk",
vmin=50.0,
vmax=100,
colorbar_ticks=[x for x in range(50,101,10)],
cmap="Reds",
plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
lookup_function=lambda mcf_result, rrt_result: (
(rrt_result.mdk_result.profit / mcf_result.status.objValue) * 100 if mcf_result.status.objValue > 0.000001
else np.NaN)
)
heatmap_specification_comparison_baseline_rr_heuristic = dict(
name="Heuristic Rounding Performance \n$\mathrm{Profit}({\mathrm{RR}_{\mathrm{Heuristic}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ",
filename="comparison_baseline_rr_heuristic",
vmin=50.0,
vmax=100,
colorbar_ticks=[x for x in range(50,101,10)],
cmap="Reds",
plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
lookup_function=lambda mcf_result, rrt_result: (
(rrt_result.result_wo_violations.profit / mcf_result.status.objValue) * 100 if mcf_result.status.objValue > 0.000001
else np.NaN)
)
heatmap_specification_comparison_baseline_rr_min_load = dict(
name="Heuristic Rounding Performance \n$\mathrm{Profit}({\mathrm{RR}_{\mathrm{MinLoad}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ",
filename="comparison_baseline_rr_min_load",
vmin=95.0,
vmax=145.0,
colorbar_ticks=[x for x in range(95,146,10)],
cmap="Reds",
plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
lookup_function=lambda mcf_result, rrt_result: (
(rrt_result.collection_of_samples_with_violations[0].profit / mcf_result.status.objValue) * 100 if mcf_result.status.objValue > 0.000001
else np.NaN),
rounding_function=lambda x: int(round(x))
)
heatmap_specification_comparison_baseline_rr_max_profit = dict(
name="Heuristic Rounding Performance \n$\mathrm{Profit}({\mathrm{RR}_{\mathrm{MaxProfit}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ",
filename="comparison_baseline_rr_max_profit",
vmin=95.0,
vmax=145.0,
colorbar_ticks=[x for x in range(95,146,10)],
cmap="Reds",
plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
lookup_function=lambda mcf_result, rrt_result: (
(rrt_result.collection_of_samples_with_violations[1].profit / mcf_result.status.objValue) * 100 if mcf_result.status.objValue > 0.000001
else np.NaN),
rounding_function=lambda x: int(round(x))
)
global_heatmap_specfications = [
heatmap_specification_max_node_load,
heatmap_specification_max_edge_load,
heatmap_specification_obj,
heatmap_specification_runtime,
heatmap_specification_embedding_ratio,
heatmap_specification_average_node_load,
heatmap_specification_average_edge_load,
heatmap_specification_max_load,
heatmap_specification_avg_load,
heatmap_specification_nu_real_req,
heatmap_specification_embedding_ratio_cleaned,
heatmap_specification_runtime_randround_preprocessing,
heatmap_specification_runtime_randround_optimization,
heatmap_specification_runtime_randround_postprocessing,
heatmap_specification_comparison_baseline_rr_mdk,
heatmap_specification_comparison_baseline_rr_heuristic,
heatmap_specification_comparison_baseline_rr_min_load,
heatmap_specification_comparison_baseline_rr_max_profit,
heatmap_specification_runtime_randround_runtime,
heatmap_specification_runtime_mdk_runtime,
]
heatmap_specifications_per_type = {
plot_type_item : [heatmap_specification for heatmap_specification in global_heatmap_specfications if heatmap_specification['plot_type'] == plot_type_item]
for plot_type_item in [HeatmapPlotType.Simple_MCF, HeatmapPlotType.Simple_RRT, HeatmapPlotType.Comparison_MCF_vs_RRT]
}
"""
Axes specifications used for the heatmap plots.
Each specification contains the following elements:
- x_axis_parameter: the parameter name on the x-axis
- y_axis_parameter: the parameter name on the y-axis
- x_axis_title: the legend of the x-axis
- y_axis_title: the legend of the y-axis
- foldername: the folder to store the respective plots in
"""
heatmap_axes_specification_resources = dict(
x_axis_parameter="node_resource_factor",
y_axis_parameter="edge_resource_factor",
x_axis_title="Node Resource Factor",
y_axis_title="Edge Resource Factor",
foldername="AXES_RESOURCES"
)
heatmap_axes_specification_requests_edge_load = dict(
x_axis_parameter="number_of_requests",
y_axis_parameter="edge_resource_factor",
x_axis_title="Number of Requests",
y_axis_title="Edge Resource Factor",
foldername="AXES_NO_REQ_vs_EDGE_RF"
)
heatmap_axes_specification_requests_node_load = dict(
x_axis_parameter="number_of_requests",
y_axis_parameter="node_resource_factor",
x_axis_title="Number of Requests",
y_axis_title="Node Resource Factor",
foldername="AXES_NO_REQ_vs_NODE_RF"
)
global_heatmap_axes_specifications = [heatmap_axes_specification_requests_edge_load,
heatmap_axes_specification_resources,
heatmap_axes_specification_requests_node_load]
def compute_average_node_load(result_summary):
logger.warn("In the function compute_average_node_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x == "universal":
cum_loads.append(result_summary.load[(x, y)])
return np.mean(cum_loads)
def compute_average_edge_load(result_summary):
logger.warn("In the function compute_average_edge_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x != "universal":
cum_loads.append(result_summary.load[(x, y)])
return np.mean(cum_loads)
def compute_max_node_load(result_summary):
logger.warn("In the function compute_max_node_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x == "universal":
cum_loads.append(result_summary.load[(x, y)])
return max(cum_loads)
def compute_max_edge_load(result_summary):
logger.warn("In the function compute_max_edge_load the single universal node type 'univerval' is assumed."
"This should be fixed in the future and might yield wrong results when considering more general "
"resource types. Disregard this warning if you know what you are doing.")
cum_loads = []
for (x, y) in result_summary.load.keys():
if x != "universal":
cum_loads.append(result_summary.load[(x, y)])
return max(cum_loads)
def compute_avg_load(result_summary):
cum_loads = []
for (x, y) in result_summary.load.keys():
cum_loads.append(result_summary.load[(x, y)])
return np.mean(cum_loads)
def compute_max_load(result_summary):
cum_loads = []
for (x, y) in result_summary.load.keys():
cum_loads.append(result_summary.load[(x, y)])
return max(cum_loads)
def select_scenarios_with_high_objective_gap_or_zero_requests(dc_baseline, algorithm_name,
output_respective_generation_parameters=True):
''' Function to select scenarios with high objective gap or no requests. This function is not used anymore but
is left here for future usage.
'''
scenario_ids = dc_baseline.algorithm_scenario_solution_dictionary[algorithm_name].keys()
result = []
for scenario_id in scenario_ids:
scenario_solution = dc_baseline.get_solutions_by_scenario_index(scenario_id)[algorithm_name][0]
scenario_status = scenario_solution.status
if scenario_status.objGap > 100:
result.append(scenario_id)
if output_respective_generation_parameters:
print "Scenario {} has a very high gap, i.e. a gap of {} due to the objective bound being {} and the objective value being {}".format(
scenario_id,
scenario_status.objGap,
scenario_status.objBound,
scenario_status.objValue
)
print "The computation for this scenario took {} seconds.".format(scenario_solution.runtime)
print "This scenario had the following generation parameters:"
generation_parameters = extract_generation_parameters(
dc_baseline.scenario_parameter_container.scenario_parameter_dict, scenario_id
)
for gen_param in generation_parameters:
print "\t {}".format(gen_param)
if scenario_solution.nu_real_req < 0.5:
result.append(scenario_id)
if output_respective_generation_parameters:
print "Scenario {} has doesn't have any reasonable scenarios in it...{}".format(scenario_id,
scenario_status.objGap,
scenario_status.objBound,
scenario_status.objValue)
print "The computation for this scenario took {} seconds.".format(scenario_solution.runtime)
print "This scenario had the following generation parameters:"
generation_parameters = extract_generation_parameters(
dc_baseline.scenario_parameter_container.scenario_parameter_dict, scenario_id
)
for gen_param in generation_parameters:
print "\t {}".format(gen_param)
print "{} many scenarios experienced a very, very high gap or contained 0 requests".format(len(result))
return result
def get_title_for_filter_specifications(filter_specifications):
result = "\n".join(
[filter_specification['parameter'] + "=" + str(filter_specification['value']) + "; " for filter_specification in
filter_specifications])
return result[:-2]
def extract_parameter_range(scenario_parameter_space_dict, key):
if not isinstance(scenario_parameter_space_dict, dict):
return None
for generator_name, value in scenario_parameter_space_dict.iteritems():
if generator_name == key:
return [key], value
if isinstance(value, list):
if len(value) != 1:
continue
value = value[0]
result = extract_parameter_range(value, key)
if result is not None:
path, values = result
return [generator_name, 0] + path, values
elif isinstance(value, dict):
result = extract_parameter_range(value, key)
if result is not None:
path, values = result
return [generator_name] + path, values
return None
def extract_generation_parameters(scenario_parameter_dict, scenario_id):
if not isinstance(scenario_parameter_dict, dict):
return None
results = []
for generator_name, value in scenario_parameter_dict.iteritems():
if isinstance(value, set) and generator_name != "all" and scenario_id in value:
return [[generator_name]]
if isinstance(value, list):
if len(value) != 1:
continue
value = value[0]
result = extract_generation_parameters(value, scenario_id)
if result is not None:
for atomic_result in result:
results.append([generator_name] + atomic_result)
elif isinstance(value, dict):
result = extract_generation_parameters(value, scenario_id)
if result is not None:
for atomic_result in result:
results.append([generator_name] + atomic_result)
if results == []:
return None
else:
# print "returning {}".format(results)
return results
def lookup_scenarios_having_specific_values(scenario_parameter_space_dict, path, value):
current_path = path[:]
current_dict = scenario_parameter_space_dict
while len(current_path) > 0:
if isinstance(current_path[0], basestring):
current_dict = current_dict[current_path[0]]
current_path.pop(0)
elif current_path[0] == 0:
current_path.pop(0)
# print current_dict
return current_dict[value]
def lookup_scenario_parameter_room_dicts_on_path(scenario_parameter_space_dict, path):
current_path = path[:]
current_dict_or_list = scenario_parameter_space_dict
dicts_on_path = []
while len(current_path) > 0:
dicts_on_path.append(current_dict_or_list)
if isinstance(current_path[0], basestring):
current_dict_or_list = current_dict_or_list[current_path[0]]
current_path.pop(0)
elif isinstance(current_path[0], int):
current_dict_or_list = current_dict_or_list[int(current_path[0])]
current_path.pop(0)
else:
raise RuntimeError("Could not lookup dicts.")
return dicts_on_path
def load_reduced_pickle(reduced_pickle):
with open(reduced_pickle, "rb") as f:
data = pickle.load(f)
return data
class AbstractPlotter(object):
''' Abstract Plotter interface providing functionality used by the majority of plotting classes of this module.
'''
def __init__(self,
output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
self.output_path = output_path
self.output_filetype = output_filetype
self.scenario_solution_storage = scenario_solution_storage
self.algorithm_id = algorithm_id
self.execution_id = execution_id
self.scenario_parameter_dict = self.scenario_solution_storage.scenario_parameter_container.scenario_parameter_dict
self.scenarioparameter_room = self.scenario_solution_storage.scenario_parameter_container.scenarioparameter_room
self.all_scenario_ids = set(scenario_solution_storage.algorithm_scenario_solution_dictionary[self.algorithm_id].keys())
self.show_plot = show_plot
self.save_plot = save_plot
self.overwrite_existing_files = overwrite_existing_files
if not forbidden_scenario_ids:
self.forbidden_scenario_ids = set()
else:
self.forbidden_scenario_ids = forbidden_scenario_ids
self.paper_mode=paper_mode
def _construct_output_path_and_filename(self, title, filter_specifications=None):
filter_spec_path = ""
filter_filename = "no_filter.{}".format(OUTPUT_FILETYPE)
if filter_specifications:
filter_spec_path, filter_filename = self._construct_path_and_filename_for_filter_spec(filter_specifications)
base = os.path.normpath(OUTPUT_PATH)
date = strftime("%Y-%m-%d", gmtime())
output_path = os.path.join(base, date, OUTPUT_FILETYPE, "general_plots", filter_spec_path)
filename = os.path.join(output_path, title + "_" + filter_filename)
return output_path, filename
def _construct_path_and_filename_for_filter_spec(self, filter_specifications):
filter_path = ""
filter_filename = ""
for spec in filter_specifications:
filter_path = os.path.join(filter_path, (spec['parameter'] + "_" + str(spec['value'])))
filter_filename += spec['parameter'] + "_" + str(spec['value']) + "_"
filter_filename = filter_filename[:-1] + "." + OUTPUT_FILETYPE
return filter_path, filter_filename
def _obtain_scenarios_based_on_filters(self, filter_specifications=None):
allowed_scenario_ids = set(self.all_scenario_ids)
sps = self.scenarioparameter_room
spd = self.scenario_parameter_dict
if filter_specifications:
for filter_specification in filter_specifications:
filter_path, _ = extract_parameter_range(sps, filter_specification['parameter'])
filter_indices = lookup_scenarios_having_specific_values(spd, filter_path,
filter_specification['value'])
allowed_scenario_ids = allowed_scenario_ids & filter_indices
return allowed_scenario_ids
def _obtain_scenarios_based_on_axis(self, axis_path, axis_value):
spd = self.scenario_parameter_dict
return lookup_scenarios_having_specific_values(spd, axis_path, axis_value)
def _show_and_or_save_plots(self, output_path, filename):
plt.tight_layout()
if self.save_plot:
if not os.path.exists(output_path):
os.makedirs(output_path)
print "saving plot: {}".format(filename)
plt.savefig(filename)
if self.show_plot:
plt.show()
plt.close()
def plot_figure(self, filter_specifications):
raise RuntimeError("This is an abstract method")
class SingleHeatmapPlotter(AbstractPlotter):
def __init__(self,
output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
heatmap_plot_type,
list_of_axes_specifications = global_heatmap_axes_specifications,
list_of_metric_specifications = None,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
super(SingleHeatmapPlotter, self).__init__(output_path, output_filetype, scenario_solution_storage,
algorithm_id, execution_id, show_plot, save_plot,
overwrite_existing_files, forbidden_scenario_ids, paper_mode)
if heatmap_plot_type is None or heatmap_plot_type not in HeatmapPlotType.VALUE_RANGE:
raise RuntimeError("heatmap_plot_type {} is not a valid input. Must be of type HeatmapPlotType.".format(heatmap_plot_type))
self.heatmap_plot_type = heatmap_plot_type
if not list_of_axes_specifications:
raise RuntimeError("Axes need to be provided.")
self.list_of_axes_specifications = list_of_axes_specifications
if not list_of_metric_specifications:
self.list_of_metric_specifications = heatmap_specifications_per_type[self.heatmap_plot_type]
else:
for metric_specification in list_of_metric_specifications:
if metric_specification.plot_type != self.heatmap_plot_type:
raise RuntimeError("The metric specification {} does not agree with the plot type {}.".format(metric_specification, self.heatmap_plot_type))
self.list_of_metric_specifications = list_of_metric_specifications
def _construct_output_path_and_filename(self, metric_specification, heatmap_axes_specification, filter_specifications=None):
filter_spec_path = ""
filter_filename = "no_filter.{}".format(OUTPUT_FILETYPE)
if filter_specifications:
filter_spec_path, filter_filename = self._construct_path_and_filename_for_filter_spec(filter_specifications)
base = os.path.normpath(OUTPUT_PATH)
date = strftime("%Y-%m-%d", gmtime())
axes_foldername = heatmap_axes_specification['foldername']
output_path = os.path.join(base, date, OUTPUT_FILETYPE, axes_foldername, filter_spec_path)
filename = os.path.join(output_path, metric_specification['filename'] + "_" + filter_filename)
return output_path, filename
def plot_figure(self, filter_specifications):
for axes_specification in self.list_of_axes_specifications:
for metric_specfication in self.list_of_metric_specifications:
self.plot_single_heatmap_general(metric_specfication, axes_specification, filter_specifications)
def _lookup_solutions(self, scenario_ids):
return [(self.scenario_solution_storage.get_solutions_by_scenario_index(x)[self.algorithm_id][self.execution_id],) for x in scenario_ids]
def plot_single_heatmap_general(self,
heatmap_metric_specification,
heatmap_axes_specification,
filter_specifications=None):
# data extraction
sps = self.scenarioparameter_room
spd = self.scenario_parameter_dict
output_path, filename = self._construct_output_path_and_filename(heatmap_metric_specification,
heatmap_axes_specification,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
#check if filter specification conflicts with axes specification
if filter_specifications is not None:
for filter_specification in filter_specifications:
if (heatmap_axes_specification['x_axis_parameter'] == filter_specification['parameter'] or
heatmap_axes_specification['y_axis_parameter'] == filter_specification['parameter']):
logger.debug("Skipping generation of {} as the filter specification conflicts with the axes specification.")
return
path_x_axis, xaxis_parameters = extract_parameter_range(sps, heatmap_axes_specification['x_axis_parameter'])
path_y_axis, yaxis_parameters = extract_parameter_range(sps, heatmap_axes_specification['y_axis_parameter'])
# for heatmap plot
xaxis_parameters.sort()
yaxis_parameters.sort()
# all heatmap values will be stored in X
X = np.zeros((len(yaxis_parameters), len(xaxis_parameters)))
column_labels = yaxis_parameters
row_labels = xaxis_parameters
fig, ax = plt.subplots(figsize=(5, 4))
min_number_of_observed_values = 10000000000000
max_number_of_observed_values = 0
observed_values = np.empty(0)
for x_index, x_val in enumerate(xaxis_parameters):
# all scenario indices which has x_val as xaxis parameter (e.g. node_resource_factor = 0.5
scenario_ids_matching_x_axis = lookup_scenarios_having_specific_values(spd, path_x_axis, x_val)
for y_index, y_val in enumerate(yaxis_parameters):
scenario_ids_matching_y_axis = lookup_scenarios_having_specific_values(spd, path_y_axis, y_val)
filter_indices = self._obtain_scenarios_based_on_filters(filter_specifications)
scenario_ids_to_consider = (scenario_ids_matching_x_axis &
scenario_ids_matching_y_axis &
filter_indices) - self.forbidden_scenario_ids
solutions = self._lookup_solutions(scenario_ids_to_consider)
values = [heatmap_metric_specification['lookup_function'](*solution) for solution in solutions]
if 'metric_filter' in heatmap_metric_specification:
values = [value for value in values if heatmap_metric_specification['metric_filter'](value)]
observed_values = np.append(observed_values, values)
if len(values) < min_number_of_observed_values:
min_number_of_observed_values = len(values)
if len(values) > max_number_of_observed_values:
max_number_of_observed_values = len(values)
logger.debug("values are {}".format(values))
m = np.nanmean(values)
logger.debug("mean is {}".format(m))
if 'rounding_function' in heatmap_metric_specification:
rounded_m = heatmap_metric_specification['rounding_function'](m)
else:
rounded_m = float("{0:.1f}".format(round(m, 2)))
plt.text(x_index + .5,
y_index + .45,
rounded_m,
verticalalignment="center",
horizontalalignment="center",
fontsize=17.5,
fontname="Courier New",
# family="monospace",
color='w',
path_effects=[PathEffects.withStroke(linewidth=4, foreground="k")]
)
X[y_index, x_index] = rounded_m
if min_number_of_observed_values == max_number_of_observed_values:
solution_count_string = "{} values per square".format(min_number_of_observed_values)
else:
solution_count_string = "between {} and {} values per square".format(min_number_of_observed_values,
max_number_of_observed_values)
if self.paper_mode:
ax.set_title(heatmap_metric_specification['name'], fontsize=17)
else:
title = heatmap_metric_specification['name'] + "\n"
if filter_specifications:
title += get_title_for_filter_specifications(filter_specifications) + "\n"
title += solution_count_string + "\n"
title += "min: {:.2f}; mean: {:.2f}; max: {:.2f}".format(np.nanmin(observed_values),
np.nanmean(observed_values),
np.nanmax(observed_values))
ax.set_title(title)
heatmap = ax.pcolor(X,
cmap=heatmap_metric_specification['cmap'],
vmin=heatmap_metric_specification['vmin'],
vmax=heatmap_metric_specification['vmax'])
if not self.paper_mode:
fig.colorbar(heatmap, label=heatmap_metric_specification['name'] + ' - mean in blue')
else:
ticks = heatmap_metric_specification['colorbar_ticks']
tick_labels = [str(tick).ljust(3) for tick in ticks]
cbar = fig.colorbar(heatmap)
cbar.set_ticks(ticks)
cbar.set_ticklabels(tick_labels)
#for label in cbar.ax.get_yticklabels():
# label.set_fontproperties(font_manager.FontProperties(family="Courier New",weight='bold'))
cbar.ax.tick_params(labelsize=15.5)
ax.set_yticks(np.arange(X.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(X.shape[1]) + 0.5, minor=False)
ax.set_xticklabels(row_labels, minor=False, fontsize=15.5)
ax.set_xlabel(heatmap_axes_specification['x_axis_title'], fontsize=16)
ax.set_ylabel(heatmap_axes_specification['y_axis_title'], fontsize=16)
ax.set_yticklabels(column_labels, minor=False, fontsize=15.5)
self._show_and_or_save_plots(output_path, filename)
class ComparisonHeatmapPlotter(SingleHeatmapPlotter):
def __init__(self,
output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
other_scenario_solution_storage,
other_algorithm_id,
other_execution_id,
heatmap_plot_type,
list_of_axes_specifications = global_heatmap_axes_specifications,
list_of_metric_specifications = None,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
super(ComparisonHeatmapPlotter, self).__init__(output_path,
output_filetype,
scenario_solution_storage,
algorithm_id,
execution_id,
heatmap_plot_type,
list_of_axes_specifications,
list_of_metric_specifications,
show_plot,
save_plot,
overwrite_existing_files,
forbidden_scenario_ids,
paper_mode)
self.other_scenario_solution_storage = other_scenario_solution_storage
self.other_algorithm_id = other_algorithm_id
self.other_execution_id = other_execution_id
if heatmap_plot_type != HeatmapPlotType.Comparison_MCF_vs_RRT:
raise RuntimeError("Only comparison heatmap plots are allowed")
def _lookup_solutions(self, scenario_ids):
return [(self.scenario_solution_storage.get_solutions_by_scenario_index(x)[self.algorithm_id][self.execution_id],
self.other_scenario_solution_storage.get_solutions_by_scenario_index(x)[self.other_algorithm_id][self.other_execution_id])
for x in scenario_ids]
class ComparisonBaselineVsRRT_Scatter_and_ECDF(AbstractPlotter):
def __init__(self,
output_path,
output_filetype,
baseline_solution_storage,
baseline_algorithm_id,
baseline_execution_id,
randround_solution_storage,
randround_algorithm_id,
randround_execution_id,
show_plot=False,
save_plot=True,
overwrite_existing_files=False,
forbidden_scenario_ids=None,
paper_mode=True
):
super(ComparisonBaselineVsRRT_Scatter_and_ECDF, self).__init__(output_path, output_filetype, baseline_solution_storage,
baseline_algorithm_id, baseline_execution_id, show_plot, save_plot,
overwrite_existing_files, forbidden_scenario_ids, paper_mode)
if randround_algorithm_id != "RandomizedRoundingTriumvirate":
raise RuntimeError("The capacity violation plot can only be applied to RandomizedRoundingTriumvirate results.")
self.randround_solution_storage = randround_solution_storage
self.randround_algorithm_id = randround_algorithm_id
self.randround_execution_id = randround_execution_id
self._randround_data_names = ['min_aug', 'max_profit', 'wo_viol', 'mdk']
self._randround_data_names_with_baseline = ['min_aug', 'max_profit', 'wo_viol', 'mdk', "baseline"]
self.label_names = {'min_aug': "min. augmentation",
'max_profit': "max. profit",
'wo_viol': "rounding w/o augmentation",
'mdk': "multi-dimensional knapsack",
'baseline': "baseline"}
self.math_label_names = {'min_aug': "\mathrm{RR}_{\mathrm{MinLoad}}",
'max_profit': "\mathrm{RR}_{\mathrm{MaxProfit}}",
'wo_viol': "\mathrm{RR}_{\mathrm{Heuristic}}",
'mdk': "\mathrm{RR}_{\mathrm{MDK}}",
'baseline': "\mathrm{MIP}_{\mathrm{MCF}}"}
self.markers = {'min_aug': "o",
'max_profit': "v",
'wo_viol': "x",
'mdk': "+",
'baseline': "^"}
self.colors = {'min_aug': "salmon",
'max_profit': "darkred",
'wo_viol': "g",
'mdk': "b",
'baseline': "k"}
self._randround_data_lookups = {'min_aug': (lambda x: x.collection_of_samples_with_violations[0]),
'max_profit': (lambda x: x.collection_of_samples_with_violations[1]),
'wo_viol': (lambda x: x.result_wo_violations),
'mdk': (lambda x: x.mdk_result)}
filter_path_number_of_requests, list_number_of_requests = extract_parameter_range(self.scenarioparameter_room, "number_of_requests")
self._number_of_requests_list = list_number_of_requests
self._filter_path_number_of_requests = filter_path_number_of_requests
self._nan_dict = {randround_data_name : np.NaN for randround_data_name in self._randround_data_names}
self._profit_result_data_list = {randround_data_name: np.NaN for randround_data_name in
self._randround_data_names}
self._profit_result = self._nan_dict
self._load_result = {randround_data_name: [np.NaN, np.NaN] for randround_data_name in self._randround_data_names}
def _lookup_baseline_solution(self, scenario_id):
return self.scenario_solution_storage.get_solutions_by_scenario_index(scenario_id)[self.algorithm_id][self.execution_id]
def _lookup_randround_solution(self, scenario_id):
return self.randround_solution_storage.get_solutions_by_scenario_index(scenario_id)[self.randround_algorithm_id][self.randround_execution_id]
def _compute_profits_relative_to_baseline(self, baseline_solution, randround_solution):
baseline_objective = baseline_solution.status.objValue
if baseline_objective > 0.00001:
self._profit_result = self._profit_result_data_list
for randround_data_name in self._randround_data_names:
randround_solution_for_data_name = self._randround_data_lookups[randround_data_name](randround_solution)
self._profit_result[randround_data_name] = (randround_solution_for_data_name.profit / baseline_objective)*100.0
else:
logger.warn(
"The baseline objective of is zero. discarding value.")
self._profit_result = self._nan_dict
def _compute_maximal_load_for_randround(self, randround_solution):
for randround_data_name in self._randround_data_names:
randround_solution_for_data_name = self._randround_data_lookups[randround_data_name](randround_solution)
self._load_result[randround_data_name][0] = randround_solution_for_data_name.max_node_load * 100.0
self._load_result[randround_data_name][1] = randround_solution_for_data_name.max_edge_load * 100.0
def _extract_first_dual_bound_from_baseline_solution(self, baseline_solution):
log_time_root = 100000000000
root_entry = baseline_solution.temporal_log.root_relaxation_entry
root_entry_dual_bound = -(10 ** 80)
first_log_entry_dual_bound = -(10 ** 80)
if root_entry is not None:
root_entry_dual_bound = root_entry.data.objective_bound
else:
logger.debug("The root entry is none...")
first_log_entry = baseline_solution.temporal_log.log_entries[0]
if first_log_entry is not None:
first_log_entry_dual_bound = first_log_entry.data.objective_bound
else:
logger.debug("The first entry of the temporal log is none...")
result = max(root_entry_dual_bound, first_log_entry_dual_bound)
if result < -(10 **40):
logger.warn("The dual bound of the MIP is garbage. discarding it.")
return np.nan
else:
return result
def _extract_final_dual_bound_from_baseline_solution(self, baseline_solution):
best_bnd = (10 **80)
for log_entry in baseline_solution.temporal_log.log_entries:
if log_entry.data.objective_bound < best_bnd:
best_bnd = log_entry.data.objective_bound
if best_bnd > 10**70:
logger.warn("Best bound of MIP could not be determined.")
return np.NaN
else:
return best_bnd
def _compute_relative_dual_bound_to_randround_ROOT(self, baseline_solution, randround_solution):
baseline_dual_bound = self._extract_first_dual_bound_from_baseline_solution(baseline_solution)
randround_dual_bound = randround_solution.meta_data.status.objValue
if randround_dual_bound > 0.0001:
result = baseline_dual_bound / randround_dual_bound
if result > 1000:
logger.warn("The relative dual bound {} is very high. It's a result from {} {}. discarding it.".format(result, baseline_dual_bound, randround_dual_bound))
return np.nan
return result
else:
logger.warn(
"The randround dual bound is zero. discarding value.")
return np.NaN
def _compute_relative_dual_bound_to_randround_FINAL(self, baseline_solution, randround_solution):
baseline_dual_bound = self._extract_final_dual_bound_from_baseline_solution(baseline_solution)
randround_dual_bound = randround_solution.meta_data.status.objValue
if randround_dual_bound > 0.0001:
result = baseline_dual_bound / randround_dual_bound
if result > 1000:
logger.warn("The relative dual bound {} is very high. It's a result from {} {}. discarding it.".format(result, baseline_dual_bound, randround_dual_bound))
return np.nan
return result
else:
logger.warn(
"The randround dual bound is zero. discarding value.")
return np.NaN
def compute_relative_profits_arrays(self, list_of_scenarios):
number_of_entries = len(list_of_scenarios)
result = {randround_data_name: np.full(number_of_entries, np.nan) for randround_data_name in
self._randround_data_names}
for i, scenario_id in enumerate(list_of_scenarios):
baseline_solution = self._lookup_baseline_solution(scenario_id)
randround_solution = self._lookup_randround_solution(scenario_id)
self._compute_profits_relative_to_baseline(baseline_solution, randround_solution)
for randround_data_name in self._randround_data_names:
result[randround_data_name][i] = self._profit_result[randround_data_name]
return result
def compute_maximal_load_arrays(self, list_of_scenarios):
number_of_entries = len(list_of_scenarios)
result = {data_name: [np.full(number_of_entries, np.nan), np.full(number_of_entries, np.nan)]
for data_name in self._randround_data_names_with_baseline}
for i, scenario_id in enumerate(list_of_scenarios):
baseline_solution = self._lookup_baseline_solution(scenario_id)
randround_solution = self._lookup_randround_solution(scenario_id)
self._compute_maximal_load_for_randround(randround_solution)
for randround_data_name in self._randround_data_names:
result[randround_data_name][0][i] = self._load_result[randround_data_name][0]
result[randround_data_name][1][i] = self._load_result[randround_data_name][1]
result['baseline'][0][i] = compute_max_node_load(baseline_solution)
result['baseline'][1][i] = compute_max_edge_load(baseline_solution)
return result
def compute_dual_bound_array(self, list_of_scenarios):
result = {number_of_requests: None for number_of_requests in
self._number_of_requests_list}
for number_of_requests in self._number_of_requests_list:
scenario_ids_with_right_number_of_requests = self._obtain_scenarios_based_on_filters([{"parameter": "number_of_requests", "value": number_of_requests}])
scenario_ids_with_right_number_of_requests &= set(list_of_scenarios)
result[number_of_requests] = [np.full(len(scenario_ids_with_right_number_of_requests), np.nan), np.full(len(scenario_ids_with_right_number_of_requests), np.nan)]
for i, scenario_id in enumerate(scenario_ids_with_right_number_of_requests):
baseline_solution = self._lookup_baseline_solution(scenario_id)
randround_solution = self._lookup_randround_solution(scenario_id)
result[number_of_requests][0][i] = self._compute_relative_dual_bound_to_randround_ROOT(baseline_solution, randround_solution)
result[number_of_requests][1][i] = self._compute_relative_dual_bound_to_randround_FINAL(baseline_solution, randround_solution)
return result
def plot_figure(self, filter_specifications):
self.plot_figure_ecdf_load(filter_specifications)
self.plot_figure_ecdf_objective(filter_specifications)
self.plot_bound_ecdf(filter_specifications)
self.plot_scatter_obj_vs_load(filter_specifications)
def plot_figure_ecdf_load(self, filter_specifications):
output_filename = "ECDF_load"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
result = self.compute_maximal_load_arrays(scenario_ids)
fix, ax = plt.subplots(figsize=(5, 4))
#cum_line = matplotlib.lines.Line2D([], [], color='k', linestyle="-", label='total')
node_line = matplotlib.lines.Line2D([], [], color='gray', linestyle="-.", label='node')
edge_line = matplotlib.lines.Line2D([], [], color='gray', linestyle="-", label='edge')
second_legend_handlers = []
max_observed_value = 0
for data_name in self._randround_data_names_with_baseline:
#sorted_data_cum = np.sort(np.maximum(result[data_name][0], result[data_name][1]))
sorted_data_node = np.sort(result[data_name][0])
sorted_data_edge = np.sort(result[data_name][1])
max_observed_value = np.maximum(max_observed_value, sorted_data_node[-1])
max_observed_value = np.maximum(max_observed_value, sorted_data_edge[-1])
yvals = np.arange(1,len(sorted_data_node)+1) / float(len(sorted_data_node))
second_legend_handlers.append(matplotlib.lines.Line2D([], [], color=self.colors[data_name], linestyle="-", label="${}$".format(self.math_label_names[data_name])))
#ax.plot(sorted_data_cum, yvals, color=self.colors[data_name], linestyle="-")
ax.plot(sorted_data_node, yvals, color=self.colors[data_name], linestyle="-.")
ax.plot(sorted_data_edge, yvals, color=self.colors[data_name], linestyle="-")
first_legend = plt.legend(handles=[node_line, edge_line], loc=4, fontsize=14, title="Resource", handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(first_legend.get_title(), fontsize=14)
plt.gca().add_artist(first_legend)
second_legend = plt.legend(handles=second_legend_handlers, loc=2, fontsize=14, title="Algorithm", handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(second_legend.get_title(), fontsize=14)
ax.set_xlim(10, max_observed_value * 1.1)
ax.set_xscale("log", basex=10)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xticks([10, 50, 100, 200, 500], minor=False)
ax.set_xticks([20,30,40,50,60,70,80,90, 300,400], minor=True)
ax.set_title("ECDF of Resource Loads",fontsize=17)
ax.set_xlabel("Maximum Resource Load [%]", fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
ax.grid(True, which="both")
ax.tick_params(axis='both', which='major', labelsize=15.5)
ax.tick_params(axis='x', which='minor', labelsize=15.5)
plt.grid(True, which="both")
plt.tight_layout()
self._show_and_or_save_plots(output_path, filename)
def plot_figure_ecdf_objective(self, filter_specifications):
output_filename = "ECDF_objective"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
result = self.compute_relative_profits_arrays(scenario_ids)
fix, ax = plt.subplots(figsize=(5, 4))
max_observed_value = 0
for data_name in self._randround_data_names:
sorted_data = np.sort(result[data_name])
max_observed_value = np.maximum(max_observed_value, sorted_data[-1])
yvals = np.arange(1,len(sorted_data)+1) / float(len(sorted_data))
ax.plot(sorted_data, yvals, color=self.colors[data_name], linestyle="-", label="${}$".format(self.math_label_names[data_name]))
leg = plt.legend(loc=4, title="Algorithm", fontsize=14, handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(leg.get_title(), fontsize=14)
ax.set_title("ECDF of Relative Achieved Profit", fontsize=17)
ax.set_xlabel("$\mathrm{Profit}({\mathrm{RR}_{\mathrm{Alg}}}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%] ", fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
ax.grid(True, which="both")
ax.tick_params(axis='both', which='major', labelsize=15.5)
#ax.set_xscale("log", basex=10)
ax.set_xlim(20,max_observed_value*1.1)
plt.tight_layout()
self._show_and_or_save_plots(output_path, filename)
def plot_bound_ecdf(self, filter_specifications):
output_filename = "ECDF_bound"
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
if filter_specifications:
for filter_specification in filter_specifications:
if filter_specification["parameter"] == "number_of_requests":
logger.info("Skipping generation of {} as this conflicts with the filter specification {}".format(output_filename, filter_specification))
return
scenario_ids = self._obtain_scenarios_based_on_filters(filter_specifications)
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
result = self.compute_dual_bound_array(scenario_ids)
fix, ax = plt.subplots(figsize=(5, 4))
#ax.set_xscale("log", basex=10)
colors = ['k','g', 'b', 'r']
max_observed_value = 0
number_requests_legend_handlers = []
for i, number_of_requests in enumerate(self._number_of_requests_list):
result_for_requests = result[number_of_requests][0]
sorted_data = np.sort(result_for_requests[~np.isnan(result_for_requests)])
max_observed_value = np.maximum(max_observed_value, sorted_data[-1])
yvals = np.arange(1,len(sorted_data)+1) / float(len(sorted_data))
ax.plot(sorted_data, yvals, color=colors[i], linestyle="-", label="{}".format(number_of_requests), linewidth=1.8)
result_for_requests = result[number_of_requests][1]
sorted_data = np.sort(result_for_requests[~np.isnan(result_for_requests)])
max_observed_value = np.maximum(max_observed_value, sorted_data[-1])
yvals = np.arange(1, len(sorted_data) + 1) / float(len(sorted_data))
ax.plot(sorted_data, yvals, color=colors[i], linestyle=":",
linewidth=2.4)
number_requests_legend_handlers.append(matplotlib.lines.Line2D([], [], color=colors[i], linestyle="-", label='{}'.format(number_of_requests)))
root_legend_handlers = [matplotlib.lines.Line2D([], [], color='gray', linestyle="-", label='initial'), matplotlib.lines.Line2D([], [], color='gray', linestyle=":", label='final')]
first_legend = plt.legend(title="Bound($\mathrm{MIP}_{\mathrm{MCF}})$", handles=root_legend_handlers, loc=(0.225,0.0125), fontsize=14, handletextpad=0.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(first_legend.get_title(), fontsize='15')
plt.gca().add_artist(first_legend)
o_leg = plt.legend(handles=number_requests_legend_handlers, loc=4, title="#Requests", fontsize=14, handletextpad=.35, borderaxespad=0.175, borderpad=0.2)
plt.setp(o_leg.get_title(), fontsize='15')
ax.set_title("$\mathrm{LP}_{\mathrm{novel}}$: Formulation Strength", fontsize=17)
ax.set_xlabel("Bound($\mathrm{MIP}_{\mathrm{MCF}}$) / Bound($\mathrm{LP}_{\mathrm{novel}}$)", fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
ax.set_xlim(0.65,max_observed_value*1.05)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(15.5)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(15.5)
ax.set_xticks([ 1, 1.5, 2, 2.5, 3, 3.5], minor=False)
ax.set_xticks([0.75, 1.25, 1.5, 1.75, 2.25, 2.5, 2.75, 3.25, 3.5], minor=True)
ax.set_yticks([x*0.1 for x in range(1,10)], minor=True)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xticklabels([], minor=True)
ax.grid(True, which="both", linestyle=":")
# gridlines = ax.get_xgridlines() + ax.get_ygridlines()
# for line in gridlines:
# line.set_linestyle(':')
self._show_and_or_save_plots(output_path, filename)
def plot_scatter_obj_vs_load(self, filter_specifications):
bounding_boxes = {'min_aug': [[50,140],[85,235]],
'max_profit': [[95,210],[90,505]],
'wo_viol': [[30 ,105],[75,102]],
'mdk': [[15,105],[75,102]]}
for data_to_plot in self._randround_data_names:
bounding_box_x = bounding_boxes[data_to_plot][0]
bounding_box_y = bounding_boxes[data_to_plot][1]
output_filename = "SCATTER_obj_vs_load_{}".format(data_to_plot)
output_path, filename = self._construct_output_path_and_filename(output_filename,
filter_specifications)
logger.debug("output_path is {};\t filename is {}".format(output_path, filename))
if not self.overwrite_existing_files and os.path.exists(filename):
logger.info("Skipping generation of {} as this file already exists".format(filename))
return
fix, ax = plt.subplots(figsize=(5, 4))
colors = list(self.colors.values())
markers = list(self.markers.values())
filter_path_NRF, node_resource_factors = extract_parameter_range(self.scenarioparameter_room,
"node_resource_factor")
filter_path_ERF, edge_resource_factors = extract_parameter_range(self.scenarioparameter_room,
"edge_resource_factor")
color_norm = matplotlib.colors.Normalize(vmin=0, vmax=6)
scalar_map = matplotlib.cm.ScalarMappable(norm=color_norm, cmap='inferno')
observed_values_relative_profit = np.empty(0)
observed_values_load = np.empty(0)
number_of_not_shown_values = 0
for i, nrf in enumerate(node_resource_factors):
for j, erf in enumerate(edge_resource_factors):
scenario_ids = self._obtain_scenarios_based_on_filters([] +
[
{"parameter": "node_resource_factor",
"value": nrf},
{"parameter": "edge_resource_factor",
"value": erf},
])
if self.forbidden_scenario_ids:
scenario_ids = scenario_ids - self.forbidden_scenario_ids
list_of_scenarios = list(scenario_ids)
result_relative_profits = self.compute_relative_profits_arrays(list_of_scenarios)
raw_result_loads = self.compute_maximal_load_arrays(list_of_scenarios)
result_cum_loads = np.maximum(raw_result_loads[data_to_plot][0],
raw_result_loads[data_to_plot][1])
observed_values_load = np.append(observed_values_load, result_cum_loads)
observed_values_relative_profit = np.append(observed_values_relative_profit,
result_relative_profits[data_to_plot])
for y_value in np.nditer(result_cum_loads):
if y_value < bounding_box_y[0] or y_value > bounding_box_y[1]:
number_of_not_shown_values += 1
for x_value in np.nditer(result_relative_profits[data_to_plot]):
if x_value < bounding_box_x[0] or x_value > bounding_box_x[1]:
number_of_not_shown_values += 1
ax.scatter(result_relative_profits[data_to_plot],
result_cum_loads,
c=matplotlib.colors.to_hex(scalar_map.to_rgba(j)),
marker="s",
label="{}".format(erf),
s=6, linewidths=.1, alpha=.8)
if i == 0:
leg = plt.legend(fontsize=14, markerscale=2, title="ERF", handletextpad=0, borderaxespad=0.175, borderpad=0.2)
for lh in leg.legendHandles:
lh.set_alpha(1.0)
plt.setp(leg.get_title(), fontsize=14)
ax.set_xlim(bounding_box_x)
ax.set_ylim(bounding_box_y)
ax.tick_params(axis='both', which='major', labelsize=15.5)
ax.tick_params(axis='x', which='minor', labelsize=15.5)
plt.grid(True, which="both")
if self.paper_mode:
ax.set_title("Vanilla Rounding Performance", fontsize=17)
else:
title = "Vanilla Rounding Performance\n"
#print observed_values_relative_profit
title += "profit: min: {:.2f}; mean: {:.2f}; max: {:.2f}\n".format(np.nanmin(observed_values_relative_profit),
np.nanmean(observed_values_relative_profit),
np.nanmax(observed_values_relative_profit))
title += "loads: min: {:.2f}; mean: {:.2f}; max: {:.2f}\n".format(np.nanmin(observed_values_load),
np.nanmean(observed_values_load),
np.nanmax(observed_values_load))
title += "{} of {} points lie outside the displayed area".format(number_of_not_shown_values, len(observed_values_relative_profit))
ax.set_title(title, fontsize=10)
xlabel = "$\mathrm{Profit}({" + self.math_label_names[
data_to_plot] + "}) / \mathrm{Profit}({\mathrm{MIP}_{\mathrm{MCF}}})$ [%]"
ax.set_xlabel(xlabel, fontsize=16)
ylabel = "$\mathrm{Max\,Load}\,({" + self.math_label_names[data_to_plot] + "})$ [%]"
ax.set_ylabel(ylabel, fontsize=16)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FormatStrFormatter("%d"))
ax.get_xaxis().set_minor_formatter(matplotlib.ticker.FormatStrFormatter("%d"))
self._show_and_or_save_plots(output_path, filename)
def _construct_filter_specs(scenario_parameter_space_dict, parameter_filter_keys, maxdepth=3):
parameter_value_dic = dict()
for parameter in parameter_filter_keys:
_, parameter_values = extract_parameter_range(scenario_parameter_space_dict,
parameter)
parameter_value_dic[parameter] = parameter_values
# print parameter_value_dic.values()
result_list = [None]
for i in range(1, maxdepth + 1):
for combi in combinations(parameter_value_dic, i):
values = []
for element_of_combi in combi:
values.append(parameter_value_dic[element_of_combi])
for v in product(*values):
filter = []
for (parameter, value) in zip(combi, v):
filter.append({'parameter': parameter, 'value': value})
result_list.append(filter)
return result_list
def evaluate_baseline_and_randround(dc_baseline,
baseline_algorithm_id,
baseline_execution_config,
dc_randround,
randround_algorithm_id,
randround_execution_config,
exclude_generation_parameters=None,
parameter_filter_keys=None,
show_plot=False,
save_plot=True,
overwrite_existing_files=True,
forbidden_scenario_ids=None,
papermode=True,
maxdepthfilter=2,
output_path="./",
output_filetype="png"):
""" Main function for evaluation, creating plots and saving them in a specific directory hierarchy.
A large variety of plots is created. For heatmaps, a generic plotter is used while for general
comparison plots (ECDF and scatter) an own class is used. The plots that shall be generated cannot
be controlled at the moment but the respective plotters can be easily adjusted.
:param dc_baseline: unpickled datacontainer of baseline experiments (e.g. MIP)
:param baseline_algorithm_id: algorithm id of the baseline algorithm
:param baseline_execution_config: execution config (numeric) of the baseline algorithm execution
:param dc_randround: unpickled datacontainer of randomized rounding experiments
:param randround_algorithm_id: algorithm id of the randround algorithm
:param randround_execution_config: execution config (numeric) of the randround algorithm execution
:param exclude_generation_parameters: specific generation parameters that shall be excluded from the evaluation.
These won't show in the plots and will also not be shown on axis labels etc.
:param parameter_filter_keys: name of parameters according to which the results shall be filtered
:param show_plot: Boolean: shall plots be shown
:param save_plot: Boolean: shall the plots be saved
:param overwrite_existing_files: shall existing files be overwritten?
:param forbidden_scenario_ids: list / set of scenario ids that shall not be considered in the evaluation
:param papermode: nicely layouted plots (papermode) or rather additional information?
:param maxdepthfilter: length of filter permutations that shall be considered
:param output_path: path to which the results shall be written
:param output_filetype: filetype supported by matplotlib to export figures
:return: None
"""
if forbidden_scenario_ids is None:
forbidden_scenario_ids = set()
if exclude_generation_parameters is not None:
for key, values_to_exclude in exclude_generation_parameters.iteritems():
parameter_filter_path, parameter_values = extract_parameter_range(
dc_baseline.scenario_parameter_container.scenarioparameter_room, key)
parameter_dicts_baseline = lookup_scenario_parameter_room_dicts_on_path(
dc_baseline.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
parameter_dicts_randround = lookup_scenario_parameter_room_dicts_on_path(
dc_randround.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
for value_to_exclude in values_to_exclude:
if value_to_exclude not in parameter_values:
raise RuntimeError("The value {} is not contained in the list of parameter values {} for key {}".format(
value_to_exclude, parameter_values, key
))
#add respective scenario ids to the set of forbidden scenario ids
forbidden_scenario_ids.update(set(lookup_scenarios_having_specific_values(
dc_baseline.scenario_parameter_container.scenario_parameter_dict, parameter_filter_path, value_to_exclude)))
#remove the respective values from the scenario parameter room such that these are not considered when
#constructing e.g. axes
parameter_dicts_baseline[-1][key] = [value for value in parameter_dicts_baseline[-1][key] if
value not in values_to_exclude]
parameter_dicts_randround[-1][key] = [value for value in parameter_dicts_randround[-1][key] if
value not in values_to_exclude]
if parameter_filter_keys is not None:
filter_specs = _construct_filter_specs(dc_baseline.scenario_parameter_container.scenarioparameter_room,
parameter_filter_keys,
maxdepth=maxdepthfilter)
else:
filter_specs = [None]
#initialize plotters
baseline_plotter = SingleHeatmapPlotter(output_path=output_path,
output_filetype=output_filetype,
scenario_solution_storage=dc_baseline,
algorithm_id=baseline_algorithm_id,
execution_id=baseline_execution_config,
heatmap_plot_type=HeatmapPlotType.Simple_MCF,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
randround_plotter = SingleHeatmapPlotter(output_path=output_path,
output_filetype=output_filetype,
scenario_solution_storage=dc_randround,
algorithm_id=randround_algorithm_id,
execution_id=randround_execution_config,
heatmap_plot_type=HeatmapPlotType.Simple_RRT,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
comparison_plotter = ComparisonHeatmapPlotter(output_path=output_path,
output_filetype=output_filetype,
scenario_solution_storage=dc_baseline,
algorithm_id=baseline_algorithm_id,
execution_id=baseline_execution_config,
other_scenario_solution_storage=dc_randround,
other_algorithm_id=randround_algorithm_id,
other_execution_id=randround_execution_config,
heatmap_plot_type=HeatmapPlotType.Comparison_MCF_vs_RRT,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
ecdf_capacity_violation_plotter = ComparisonBaselineVsRRT_Scatter_and_ECDF(output_path=output_path,
output_filetype=output_filetype,
baseline_solution_storage=dc_baseline,
baseline_algorithm_id=baseline_algorithm_id,
baseline_execution_id=baseline_execution_config,
randround_solution_storage=dc_randround,
randround_algorithm_id=randround_algorithm_id,
randround_execution_id=randround_execution_config,
show_plot=show_plot,
save_plot=save_plot,
overwrite_existing_files=overwrite_existing_files,
forbidden_scenario_ids=forbidden_scenario_ids,
paper_mode=papermode)
plotters = [ecdf_capacity_violation_plotter, baseline_plotter, randround_plotter, comparison_plotter]
for filter_spec in filter_specs:
for plotter in plotters:
plotter.plot_figure(filter_spec)
|
en
| 0.68324
|
# MIT License # # Copyright (c) 2016-2018 <NAME>, <NAME>, <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # This is the evaluation and plotting module. This module handles all plotting related evaluation. # this prevents pycharm from removing this import, which is required for unpickling solutions #a plot only for ClassicMCFResult data #a plot only for RandomizedRoundingTriumvirate data #a plot comparing ClassicMCFResult with RandomizedRoundingTriumvirate Collection of heatmap plot specifications. Each specification corresponds to a specific plot and describes all essential information: - name: the title of the plot - filename: prefix of the files to be generated - plot_type: A HeatmapPlotType describing which data is required as input. - vmin and vmax: minimum and maximum value for the heatmap - cmap: the colormap that is to be used for the heatmap - lookup_function: which of the values shall be plotted. the input is a tuple consisting of a baseline and a randomized rounding solution. The function must return a numeric value or NaN - metric filter: after having applied the lookup_function (returning a numeric value or NaN) the metric_filter is applied (if given) and values not matching this function are discarded. - rounding_function: the function that is applied for displaying the mean values in the heatmap plots - colorbar_ticks: the tick values (numeric) for the heatmap plot #Embedded / #Feasible [%] ", #Feasible Requests", Axes specifications used for the heatmap plots. Each specification contains the following elements: - x_axis_parameter: the parameter name on the x-axis - y_axis_parameter: the parameter name on the y-axis - x_axis_title: the legend of the x-axis - y_axis_title: the legend of the y-axis - foldername: the folder to store the respective plots in Function to select scenarios with high objective gap or no requests. This function is not used anymore but is left here for future usage. # print "returning {}".format(results) # print current_dict Abstract Plotter interface providing functionality used by the majority of plotting classes of this module. # data extraction #check if filter specification conflicts with axes specification # for heatmap plot # all heatmap values will be stored in X # all scenario indices which has x_val as xaxis parameter (e.g. node_resource_factor = 0.5 # family="monospace", #for label in cbar.ax.get_yticklabels(): # label.set_fontproperties(font_manager.FontProperties(family="Courier New",weight='bold')) #cum_line = matplotlib.lines.Line2D([], [], color='k', linestyle="-", label='total') #sorted_data_cum = np.sort(np.maximum(result[data_name][0], result[data_name][1])) #ax.plot(sorted_data_cum, yvals, color=self.colors[data_name], linestyle="-") #ax.set_xscale("log", basex=10) #ax.set_xscale("log", basex=10) # gridlines = ax.get_xgridlines() + ax.get_ygridlines() # for line in gridlines: # line.set_linestyle(':') #print observed_values_relative_profit # print parameter_value_dic.values() Main function for evaluation, creating plots and saving them in a specific directory hierarchy. A large variety of plots is created. For heatmaps, a generic plotter is used while for general comparison plots (ECDF and scatter) an own class is used. The plots that shall be generated cannot be controlled at the moment but the respective plotters can be easily adjusted. :param dc_baseline: unpickled datacontainer of baseline experiments (e.g. MIP) :param baseline_algorithm_id: algorithm id of the baseline algorithm :param baseline_execution_config: execution config (numeric) of the baseline algorithm execution :param dc_randround: unpickled datacontainer of randomized rounding experiments :param randround_algorithm_id: algorithm id of the randround algorithm :param randround_execution_config: execution config (numeric) of the randround algorithm execution :param exclude_generation_parameters: specific generation parameters that shall be excluded from the evaluation. These won't show in the plots and will also not be shown on axis labels etc. :param parameter_filter_keys: name of parameters according to which the results shall be filtered :param show_plot: Boolean: shall plots be shown :param save_plot: Boolean: shall the plots be saved :param overwrite_existing_files: shall existing files be overwritten? :param forbidden_scenario_ids: list / set of scenario ids that shall not be considered in the evaluation :param papermode: nicely layouted plots (papermode) or rather additional information? :param maxdepthfilter: length of filter permutations that shall be considered :param output_path: path to which the results shall be written :param output_filetype: filetype supported by matplotlib to export figures :return: None #add respective scenario ids to the set of forbidden scenario ids #remove the respective values from the scenario parameter room such that these are not considered when #constructing e.g. axes #initialize plotters
| 1.680435
| 2
|
python/events/lib/python3.4/weakref.py
|
classmember/proof_of_concept
| 0
|
6629705
|
/usr/lib64/python3.4/weakref.py
|
/usr/lib64/python3.4/weakref.py
|
none
| 1
| 1.034186
| 1
|
|
roadslink/overrides/contact.py
|
premkanish/rl
| 0
|
6629706
|
import frappe
from frappe.contacts.doctype.contact.contact import Contact
class CustomContact(Contact):
def add_phone(self, dial_country, phone, is_primary_phone=0, is_primary_mobile_no=0, autosave=False):
dial_code = frappe.get_value("Country",dial_country, "dial_code")
req_phone= dial_code.strip()+phone.strip()
if not frappe.db.exists("Contact Phone", {"phone": phone, "parent": self.name}):
self.append("phone_nos", {
"dial_country": dial_country,
"dial_code":dial_code,
"mobile_number":phone,
"phone": req_phone,
"is_primary_phone": is_primary_phone,
"is_primary_mobile_no": is_primary_mobile_no
})
if autosave:
self.save(ignore_permissions=True)
def make_phone(self):
for d in self.get("phone_nos"):
if d.dial_code and d.mobile_number:
d.phone= d.dial_code.strip()+d.mobile_number.strip()
def validate(self):
self.make_phone()
#if self.user:
#roles = frappe.get_roles(self.user)
#if self.is_new:
#if 'Customer' in roles:
super(CustomContact, self).validate()
def on_update(self):
if self.is_primary_contact:
for d in self.get("links"):
if d.link_doctype == "Customer":
frappe.db.set_value(d.link_doctype, d.link_name, {
'customer_primary_contact': self.name,
'email_id': self.email_id,
'mobile_no' : self.phone
})
elif d.link_doctype == "Supplier":
frappe.db.set_value(d.link_doctype, d.link_name, {
'supplier_primary_contact': self.name,
'email_id': self.email_id,
'mobile_no' : self.phone
})
|
import frappe
from frappe.contacts.doctype.contact.contact import Contact
class CustomContact(Contact):
def add_phone(self, dial_country, phone, is_primary_phone=0, is_primary_mobile_no=0, autosave=False):
dial_code = frappe.get_value("Country",dial_country, "dial_code")
req_phone= dial_code.strip()+phone.strip()
if not frappe.db.exists("Contact Phone", {"phone": phone, "parent": self.name}):
self.append("phone_nos", {
"dial_country": dial_country,
"dial_code":dial_code,
"mobile_number":phone,
"phone": req_phone,
"is_primary_phone": is_primary_phone,
"is_primary_mobile_no": is_primary_mobile_no
})
if autosave:
self.save(ignore_permissions=True)
def make_phone(self):
for d in self.get("phone_nos"):
if d.dial_code and d.mobile_number:
d.phone= d.dial_code.strip()+d.mobile_number.strip()
def validate(self):
self.make_phone()
#if self.user:
#roles = frappe.get_roles(self.user)
#if self.is_new:
#if 'Customer' in roles:
super(CustomContact, self).validate()
def on_update(self):
if self.is_primary_contact:
for d in self.get("links"):
if d.link_doctype == "Customer":
frappe.db.set_value(d.link_doctype, d.link_name, {
'customer_primary_contact': self.name,
'email_id': self.email_id,
'mobile_no' : self.phone
})
elif d.link_doctype == "Supplier":
frappe.db.set_value(d.link_doctype, d.link_name, {
'supplier_primary_contact': self.name,
'email_id': self.email_id,
'mobile_no' : self.phone
})
|
en
| 0.252475
|
#if self.user: #roles = frappe.get_roles(self.user) #if self.is_new: #if 'Customer' in roles:
| 2.314802
| 2
|
code/deeplens-lambda.py
|
maysax/lone-worker-safety-deepAI
| 22
|
6629707
|
#*****************************************************
# *
# Copyright 2018 Amazon.com, Inc. or its affiliates. *
# All Rights Reserved. *
# *
#*****************************************************
""" A sample lambda for object detection"""
from threading import Thread, Event
import os
import json
import numpy as np
import awscam
import cv2
import greengrasssdk
import time
import base64
import urllib
import zipfile
import sys
import datetime
#boto3 is not installed on device by default.
boto_dir = '/tmp/boto_dir'
if not os.path.exists(boto_dir):
os.mkdir(boto_dir)
urllib.urlretrieve("https://s3.amazonaws.com/dear-demo/boto_3_dist.zip", "/tmp/boto_3_dist.zip")
with zipfile.ZipFile("/tmp/boto_3_dist.zip", "r") as zip_ref:
zip_ref.extractall(boto_dir)
sys.path.append(boto_dir)
import boto3
bucket_name = "REPLACE-WITH-NAME-OF-YOUR-S3-BUCKET"
# Create an IoT client for sending to messages to the cloud.
client = greengrasssdk.client('iot-data')
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
class LocalDisplay(Thread):
""" Class for facilitating the local display of inference results
(as images). The class is designed to run on its own thread. In
particular the class dumps the inference results into a FIFO
located in the tmp directory (which lambda has access to). The
results can be rendered using mplayer by typing:
mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 /tmp/results.mjpeg
"""
def __init__(self, resolution):
""" resolution - Desired resolution of the project stream """
# Initialize the base class, so that the object can run on its own
# thread.
super(LocalDisplay, self).__init__()
# List of valid resolutions
RESOLUTION = {'1080p' : (1920, 1080), '720p' : (1280, 720), '480p' : (858, 480)}
if resolution not in RESOLUTION:
raise Exception("Invalid resolution")
self.resolution = RESOLUTION[resolution]
# Initialize the default image to be a white canvas. Clients
# will update the image when ready.
self.frame = cv2.imencode('.jpg', 255*np.ones([640, 480, 3]))[1]
self.stop_request = Event()
def run(self):
""" Overridden method that continually dumps images to the desired
FIFO file.
"""
# Path to the FIFO file. The lambda only has permissions to the tmp
# directory. Pointing to a FIFO file in another directory
# will cause the lambda to crash.
result_path = '/tmp/results.mjpeg'
# Create the FIFO file if it doesn't exist.
if not os.path.exists(result_path):
os.mkfifo(result_path)
# This call will block until a consumer is available
with open(result_path, 'w') as fifo_file:
while not self.stop_request.isSet():
try:
# Write the data to the FIFO file. This call will block
# meaning the code will come to a halt here until a consumer
# is available.
fifo_file.write(self.frame.tobytes())
except IOError:
continue
def set_frame_data(self, frame):
""" Method updates the image data. This currently encodes the
numpy array to jpg but can be modified to support other encodings.
frame - Numpy array containing the image data of the next frame
in the project stream.
"""
ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg
def join(self):
self.stop_request.set()
def push_to_s3(img):
try:
index = 0
timestamp = int(time.time())
now = datetime.datetime.now()
key = "persons/{}_{}/{}_{}/{}_{}.jpg".format(now.month, now.day,
now.hour, now.minute,
timestamp, index)
s3 = boto3.client('s3')
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
_, jpg_data = cv2.imencode('.jpg', img, encode_param)
response = s3.put_object(ACL='private',
Body=jpg_data.tostring(),
Bucket=bucket_name,
Key=key)
client.publish(topic=iot_topic, payload="Response: {}".format(response))
client.publish(topic=iot_topic, payload="Frame pushed to S3")
except Exception as e:
msg = "Pushing to S3 failed: " + str(e)
client.publish(topic=iot_topic, payload=msg)
def greengrass_infinite_infer_run():
""" Entry point of the lambda function"""
try:
# This object detection model is implemented as single shot detector (ssd), since
# the number of labels is small we create a dictionary that will help us convert
# the machine labels to human readable labels.
model_type = 'ssd'
output_map = {1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat', 5: 'bottle', 6: 'bus',
7 : 'car', 8 : 'cat', 9 : 'chair', 10 : 'cow', 11 : 'dinning table',
12 : 'dog', 13 : 'horse', 14 : 'motorbike', 15 : 'person',
16 : 'pottedplant', 17 : 'sheep', 18 : 'sofa', 19 : 'train',
20 : 'tvmonitor'}
# Create an IoT client for sending to messages to the cloud.
###client = greengrasssdk.client('iot-data')
###iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
# Create a local display instance that will dump the image bytes to a FIFO
# file that the image can be rendered locally.
local_display = LocalDisplay('480p')
local_display.start()
# The sample projects come with optimized artifacts, hence only the artifact
# path is required.
model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_resnet50_300_FP16_FUSED.xml'
# Load the model onto the GPU.
client.publish(topic=iot_topic, payload='Loading object detection model')
model = awscam.Model(model_path, {'GPU': 1})
client.publish(topic=iot_topic, payload='Object detection model loaded')
# Set the threshold for detection
detection_threshold = 0.25
# The height and width of the training set images
input_height = 300
input_width = 300
# Do inference until the lambda is killed.
while True:
detectedPerson = False
# Get a frame from the video stream
ret, frame = awscam.getLastFrame()
if not ret:
raise Exception('Failed to get frame from the stream')
# Resize frame to the same size as the training set.
frame_resize = cv2.resize(frame, (input_height, input_width))
# Run the images through the inference engine and parse the results using
# the parser API, note it is possible to get the output of doInference
# and do the parsing manually, but since it is a ssd model,
# a simple API is provided.
parsed_inference_results = model.parseResult(model_type,
model.doInference(frame_resize))
# Compute the scale in order to draw bounding boxes on the full resolution
# image.
yscale = float(frame.shape[0]/input_height)
xscale = float(frame.shape[1]/input_width)
# Dictionary to be filled with labels and probabilities for MQTT
cloud_output = {}
# Get the detected objects and probabilities
detectedPerson = False
for obj in parsed_inference_results[model_type]:
if obj['prob'] > detection_threshold:
if(output_map[obj['label']] == 'person'):
detectedPerson = True
break
if(detectedPerson):
rfr = cv2.resize(frame, (672, 380))
push_to_s3(rfr)
#fr2 = cv2.resize(frame, (1344, 760))
#_, jpg_data = cv2.imencode('.jpg', fr2)
#push_to_s3(jpg_data)
for obj in parsed_inference_results[model_type]:
if obj['prob'] > detection_threshold:
if(output_map[obj['label']] == 'person'):
detectedPerson = True
# Add bounding boxes to full resolution frame
xmin = int(xscale * obj['xmin']) \
+ int((obj['xmin'] - input_width/2) + input_width/2)
ymin = int(yscale * obj['ymin'])
xmax = int(xscale * obj['xmax']) \
+ int((obj['xmax'] - input_width/2) + input_width/2)
ymax = int(yscale * obj['ymax'])
# See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
# for more information about the cv2.rectangle method.
# Method signature: image, point1, point2, color, and tickness.
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 10)
# Amount to offset the label/probability text above the bounding box.
text_offset = 15
# See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
# for more information about the cv2.putText method.
# Method signature: image, text, origin, font face, font scale, color,
# and tickness
cv2.putText(frame, "{}: {:.2f}%".format(output_map[obj['label']],
obj['prob'] * 100),
(xmin, ymin-text_offset),
cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20), 6)
# Store label and probability to send to cloud
cloud_output[output_map[obj['label']]] = obj['prob']
# Set the next frame in the local display stream.
local_display.set_frame_data(frame)
# Send results to the cloud
client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
except Exception as ex:
client.publish(topic=iot_topic, payload='Error in object detection lambda: {}'.format(ex))
greengrass_infinite_infer_run()
|
#*****************************************************
# *
# Copyright 2018 Amazon.com, Inc. or its affiliates. *
# All Rights Reserved. *
# *
#*****************************************************
""" A sample lambda for object detection"""
from threading import Thread, Event
import os
import json
import numpy as np
import awscam
import cv2
import greengrasssdk
import time
import base64
import urllib
import zipfile
import sys
import datetime
#boto3 is not installed on device by default.
boto_dir = '/tmp/boto_dir'
if not os.path.exists(boto_dir):
os.mkdir(boto_dir)
urllib.urlretrieve("https://s3.amazonaws.com/dear-demo/boto_3_dist.zip", "/tmp/boto_3_dist.zip")
with zipfile.ZipFile("/tmp/boto_3_dist.zip", "r") as zip_ref:
zip_ref.extractall(boto_dir)
sys.path.append(boto_dir)
import boto3
bucket_name = "REPLACE-WITH-NAME-OF-YOUR-S3-BUCKET"
# Create an IoT client for sending to messages to the cloud.
client = greengrasssdk.client('iot-data')
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
class LocalDisplay(Thread):
""" Class for facilitating the local display of inference results
(as images). The class is designed to run on its own thread. In
particular the class dumps the inference results into a FIFO
located in the tmp directory (which lambda has access to). The
results can be rendered using mplayer by typing:
mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 /tmp/results.mjpeg
"""
def __init__(self, resolution):
""" resolution - Desired resolution of the project stream """
# Initialize the base class, so that the object can run on its own
# thread.
super(LocalDisplay, self).__init__()
# List of valid resolutions
RESOLUTION = {'1080p' : (1920, 1080), '720p' : (1280, 720), '480p' : (858, 480)}
if resolution not in RESOLUTION:
raise Exception("Invalid resolution")
self.resolution = RESOLUTION[resolution]
# Initialize the default image to be a white canvas. Clients
# will update the image when ready.
self.frame = cv2.imencode('.jpg', 255*np.ones([640, 480, 3]))[1]
self.stop_request = Event()
def run(self):
""" Overridden method that continually dumps images to the desired
FIFO file.
"""
# Path to the FIFO file. The lambda only has permissions to the tmp
# directory. Pointing to a FIFO file in another directory
# will cause the lambda to crash.
result_path = '/tmp/results.mjpeg'
# Create the FIFO file if it doesn't exist.
if not os.path.exists(result_path):
os.mkfifo(result_path)
# This call will block until a consumer is available
with open(result_path, 'w') as fifo_file:
while not self.stop_request.isSet():
try:
# Write the data to the FIFO file. This call will block
# meaning the code will come to a halt here until a consumer
# is available.
fifo_file.write(self.frame.tobytes())
except IOError:
continue
def set_frame_data(self, frame):
""" Method updates the image data. This currently encodes the
numpy array to jpg but can be modified to support other encodings.
frame - Numpy array containing the image data of the next frame
in the project stream.
"""
ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg
def join(self):
self.stop_request.set()
def push_to_s3(img):
try:
index = 0
timestamp = int(time.time())
now = datetime.datetime.now()
key = "persons/{}_{}/{}_{}/{}_{}.jpg".format(now.month, now.day,
now.hour, now.minute,
timestamp, index)
s3 = boto3.client('s3')
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
_, jpg_data = cv2.imencode('.jpg', img, encode_param)
response = s3.put_object(ACL='private',
Body=jpg_data.tostring(),
Bucket=bucket_name,
Key=key)
client.publish(topic=iot_topic, payload="Response: {}".format(response))
client.publish(topic=iot_topic, payload="Frame pushed to S3")
except Exception as e:
msg = "Pushing to S3 failed: " + str(e)
client.publish(topic=iot_topic, payload=msg)
def greengrass_infinite_infer_run():
""" Entry point of the lambda function"""
try:
# This object detection model is implemented as single shot detector (ssd), since
# the number of labels is small we create a dictionary that will help us convert
# the machine labels to human readable labels.
model_type = 'ssd'
output_map = {1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat', 5: 'bottle', 6: 'bus',
7 : 'car', 8 : 'cat', 9 : 'chair', 10 : 'cow', 11 : 'dinning table',
12 : 'dog', 13 : 'horse', 14 : 'motorbike', 15 : 'person',
16 : 'pottedplant', 17 : 'sheep', 18 : 'sofa', 19 : 'train',
20 : 'tvmonitor'}
# Create an IoT client for sending to messages to the cloud.
###client = greengrasssdk.client('iot-data')
###iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
# Create a local display instance that will dump the image bytes to a FIFO
# file that the image can be rendered locally.
local_display = LocalDisplay('480p')
local_display.start()
# The sample projects come with optimized artifacts, hence only the artifact
# path is required.
model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_resnet50_300_FP16_FUSED.xml'
# Load the model onto the GPU.
client.publish(topic=iot_topic, payload='Loading object detection model')
model = awscam.Model(model_path, {'GPU': 1})
client.publish(topic=iot_topic, payload='Object detection model loaded')
# Set the threshold for detection
detection_threshold = 0.25
# The height and width of the training set images
input_height = 300
input_width = 300
# Do inference until the lambda is killed.
while True:
detectedPerson = False
# Get a frame from the video stream
ret, frame = awscam.getLastFrame()
if not ret:
raise Exception('Failed to get frame from the stream')
# Resize frame to the same size as the training set.
frame_resize = cv2.resize(frame, (input_height, input_width))
# Run the images through the inference engine and parse the results using
# the parser API, note it is possible to get the output of doInference
# and do the parsing manually, but since it is a ssd model,
# a simple API is provided.
parsed_inference_results = model.parseResult(model_type,
model.doInference(frame_resize))
# Compute the scale in order to draw bounding boxes on the full resolution
# image.
yscale = float(frame.shape[0]/input_height)
xscale = float(frame.shape[1]/input_width)
# Dictionary to be filled with labels and probabilities for MQTT
cloud_output = {}
# Get the detected objects and probabilities
detectedPerson = False
for obj in parsed_inference_results[model_type]:
if obj['prob'] > detection_threshold:
if(output_map[obj['label']] == 'person'):
detectedPerson = True
break
if(detectedPerson):
rfr = cv2.resize(frame, (672, 380))
push_to_s3(rfr)
#fr2 = cv2.resize(frame, (1344, 760))
#_, jpg_data = cv2.imencode('.jpg', fr2)
#push_to_s3(jpg_data)
for obj in parsed_inference_results[model_type]:
if obj['prob'] > detection_threshold:
if(output_map[obj['label']] == 'person'):
detectedPerson = True
# Add bounding boxes to full resolution frame
xmin = int(xscale * obj['xmin']) \
+ int((obj['xmin'] - input_width/2) + input_width/2)
ymin = int(yscale * obj['ymin'])
xmax = int(xscale * obj['xmax']) \
+ int((obj['xmax'] - input_width/2) + input_width/2)
ymax = int(yscale * obj['ymax'])
# See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
# for more information about the cv2.rectangle method.
# Method signature: image, point1, point2, color, and tickness.
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 10)
# Amount to offset the label/probability text above the bounding box.
text_offset = 15
# See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
# for more information about the cv2.putText method.
# Method signature: image, text, origin, font face, font scale, color,
# and tickness
cv2.putText(frame, "{}: {:.2f}%".format(output_map[obj['label']],
obj['prob'] * 100),
(xmin, ymin-text_offset),
cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20), 6)
# Store label and probability to send to cloud
cloud_output[output_map[obj['label']]] = obj['prob']
# Set the next frame in the local display stream.
local_display.set_frame_data(frame)
# Send results to the cloud
client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
except Exception as ex:
client.publish(topic=iot_topic, payload='Error in object detection lambda: {}'.format(ex))
greengrass_infinite_infer_run()
|
en
| 0.799843
|
#***************************************************** # * # Copyright 2018 Amazon.com, Inc. or its affiliates. * # All Rights Reserved. * # * #***************************************************** A sample lambda for object detection #boto3 is not installed on device by default. # Create an IoT client for sending to messages to the cloud. Class for facilitating the local display of inference results (as images). The class is designed to run on its own thread. In particular the class dumps the inference results into a FIFO located in the tmp directory (which lambda has access to). The results can be rendered using mplayer by typing: mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 /tmp/results.mjpeg resolution - Desired resolution of the project stream # Initialize the base class, so that the object can run on its own # thread. # List of valid resolutions # Initialize the default image to be a white canvas. Clients # will update the image when ready. Overridden method that continually dumps images to the desired FIFO file. # Path to the FIFO file. The lambda only has permissions to the tmp # directory. Pointing to a FIFO file in another directory # will cause the lambda to crash. # Create the FIFO file if it doesn't exist. # This call will block until a consumer is available # Write the data to the FIFO file. This call will block # meaning the code will come to a halt here until a consumer # is available. Method updates the image data. This currently encodes the numpy array to jpg but can be modified to support other encodings. frame - Numpy array containing the image data of the next frame in the project stream. Entry point of the lambda function # This object detection model is implemented as single shot detector (ssd), since # the number of labels is small we create a dictionary that will help us convert # the machine labels to human readable labels. # Create an IoT client for sending to messages to the cloud. ###client = greengrasssdk.client('iot-data') ###iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME']) # Create a local display instance that will dump the image bytes to a FIFO # file that the image can be rendered locally. # The sample projects come with optimized artifacts, hence only the artifact # path is required. # Load the model onto the GPU. # Set the threshold for detection # The height and width of the training set images # Do inference until the lambda is killed. # Get a frame from the video stream # Resize frame to the same size as the training set. # Run the images through the inference engine and parse the results using # the parser API, note it is possible to get the output of doInference # and do the parsing manually, but since it is a ssd model, # a simple API is provided. # Compute the scale in order to draw bounding boxes on the full resolution # image. # Dictionary to be filled with labels and probabilities for MQTT # Get the detected objects and probabilities #fr2 = cv2.resize(frame, (1344, 760)) #_, jpg_data = cv2.imencode('.jpg', fr2) #push_to_s3(jpg_data) # Add bounding boxes to full resolution frame # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html # for more information about the cv2.rectangle method. # Method signature: image, point1, point2, color, and tickness. # Amount to offset the label/probability text above the bounding box. # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html # for more information about the cv2.putText method. # Method signature: image, text, origin, font face, font scale, color, # and tickness # Store label and probability to send to cloud # Set the next frame in the local display stream. # Send results to the cloud
| 2.249857
| 2
|
src/awkward/_v2/operations/structure/ak_argcartesian.py
|
BioGeek/awkward-1.0
| 0
|
6629708
|
<filename>src/awkward/_v2/operations/structure/ak_argcartesian.py
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def argcartesian(
arrays,
axis=1,
nested=None,
parameters=None,
with_name=None,
highlevel=True,
behavior=None,
):
raise NotImplementedError
# """
# Args:
# arrays (dict or iterable of arrays): Arrays on which to compute the
# Cartesian product.
# axis (int): The dimension at which this operation is applied. The
# outermost dimension is `0`, followed by `1`, etc., and negative
# values count backward from the innermost: `-1` is the innermost
# dimension, `-2` is the next level up, etc.
# nested (None, True, False, or iterable of str or int): If None or
# False, all combinations of elements from the `arrays` are
# produced at the same level of nesting; if True, they are grouped
# in nested lists by combinations that share a common item from
# each of the `arrays`; if an iterable of str or int, group common
# items for a chosen set of keys from the `array` dict or slots
# of the `array` iterable.
# parameters (None or dict): Parameters for the new
# #ak.layout.RecordArray node that is created by this operation.
# with_name (None or str): Assigns a `"__record__"` name to the new
# #ak.layout.RecordArray node that is created by this operation
# (overriding `parameters`, if necessary).
# highlevel (bool): If True, return an #ak.Array; otherwise, return
# a low-level #ak.layout.Content subclass.
# behavior (None or dict): Custom #ak.behavior for the output array, if
# high-level.
# Computes a Cartesian product (i.e. cross product) of data from a set of
# `arrays`, like #ak.cartesian, but returning integer indexes for
# #ak.Array.__getitem__.
# For example, the Cartesian product of
# >>> one = ak.Array([1.1, 2.2, 3.3])
# >>> two = ak.Array(["a", "b"])
# is
# >>> ak.to_list(ak.cartesian([one, two], axis=0))
# [(1.1, 'a'), (1.1, 'b'), (2.2, 'a'), (2.2, 'b'), (3.3, 'a'), (3.3, 'b')]
# But with argcartesian, only the indexes are returned.
# >>> ak.to_list(ak.argcartesian([one, two], axis=0))
# [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
# These are the indexes that can select the items that go into the actual
# Cartesian product.
# >>> one_index, two_index = ak.unzip(ak.argcartesian([one, two], axis=0))
# >>> one[one_index]
# <Array [1.1, 1.1, 2.2, 2.2, 3.3, 3.3] type='6 * float64'>
# >>> two[two_index]
# <Array ['a', 'b', 'a', 'b', 'a', 'b'] type='6 * string'>
# All of the parameters for #ak.cartesian apply equally to #ak.argcartesian,
# so see the #ak.cartesian documentation for a more complete description.
# """
# if axis < 0:
# raise ValueError(
# "the 'axis' of argcartesian must be non-negative"
#
# )
# else:
# if isinstance(arrays, dict):
# behavior = ak._v2._util.behaviorof(*arrays.values(), behavior=behavior)
# layouts = dict(
# (
# n,
# ak._v2.operations.convert.to_layout(
# x, allow_record=False, allow_other=False
# ).localindex(axis),
# )
# for n, x in arrays.items()
# )
# else:
# behavior = ak._v2._util.behaviorof(*arrays, behavior=behavior)
# layouts = [
# ak._v2.operations.convert.to_layout(
# x, allow_record=False, allow_other=False
# ).localindex(axis)
# for x in arrays
# ]
# if with_name is not None:
# if parameters is None:
# parameters = {}
# else:
# parameters = dict(parameters)
# parameters["__record__"] = with_name
# result = cartesian(
# layouts, axis=axis, nested=nested, parameters=parameters, highlevel=False
# )
# return ak._v2._util.maybe_wrap(result, behavior, highlevel)
|
<filename>src/awkward/_v2/operations/structure/ak_argcartesian.py
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def argcartesian(
arrays,
axis=1,
nested=None,
parameters=None,
with_name=None,
highlevel=True,
behavior=None,
):
raise NotImplementedError
# """
# Args:
# arrays (dict or iterable of arrays): Arrays on which to compute the
# Cartesian product.
# axis (int): The dimension at which this operation is applied. The
# outermost dimension is `0`, followed by `1`, etc., and negative
# values count backward from the innermost: `-1` is the innermost
# dimension, `-2` is the next level up, etc.
# nested (None, True, False, or iterable of str or int): If None or
# False, all combinations of elements from the `arrays` are
# produced at the same level of nesting; if True, they are grouped
# in nested lists by combinations that share a common item from
# each of the `arrays`; if an iterable of str or int, group common
# items for a chosen set of keys from the `array` dict or slots
# of the `array` iterable.
# parameters (None or dict): Parameters for the new
# #ak.layout.RecordArray node that is created by this operation.
# with_name (None or str): Assigns a `"__record__"` name to the new
# #ak.layout.RecordArray node that is created by this operation
# (overriding `parameters`, if necessary).
# highlevel (bool): If True, return an #ak.Array; otherwise, return
# a low-level #ak.layout.Content subclass.
# behavior (None or dict): Custom #ak.behavior for the output array, if
# high-level.
# Computes a Cartesian product (i.e. cross product) of data from a set of
# `arrays`, like #ak.cartesian, but returning integer indexes for
# #ak.Array.__getitem__.
# For example, the Cartesian product of
# >>> one = ak.Array([1.1, 2.2, 3.3])
# >>> two = ak.Array(["a", "b"])
# is
# >>> ak.to_list(ak.cartesian([one, two], axis=0))
# [(1.1, 'a'), (1.1, 'b'), (2.2, 'a'), (2.2, 'b'), (3.3, 'a'), (3.3, 'b')]
# But with argcartesian, only the indexes are returned.
# >>> ak.to_list(ak.argcartesian([one, two], axis=0))
# [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
# These are the indexes that can select the items that go into the actual
# Cartesian product.
# >>> one_index, two_index = ak.unzip(ak.argcartesian([one, two], axis=0))
# >>> one[one_index]
# <Array [1.1, 1.1, 2.2, 2.2, 3.3, 3.3] type='6 * float64'>
# >>> two[two_index]
# <Array ['a', 'b', 'a', 'b', 'a', 'b'] type='6 * string'>
# All of the parameters for #ak.cartesian apply equally to #ak.argcartesian,
# so see the #ak.cartesian documentation for a more complete description.
# """
# if axis < 0:
# raise ValueError(
# "the 'axis' of argcartesian must be non-negative"
#
# )
# else:
# if isinstance(arrays, dict):
# behavior = ak._v2._util.behaviorof(*arrays.values(), behavior=behavior)
# layouts = dict(
# (
# n,
# ak._v2.operations.convert.to_layout(
# x, allow_record=False, allow_other=False
# ).localindex(axis),
# )
# for n, x in arrays.items()
# )
# else:
# behavior = ak._v2._util.behaviorof(*arrays, behavior=behavior)
# layouts = [
# ak._v2.operations.convert.to_layout(
# x, allow_record=False, allow_other=False
# ).localindex(axis)
# for x in arrays
# ]
# if with_name is not None:
# if parameters is None:
# parameters = {}
# else:
# parameters = dict(parameters)
# parameters["__record__"] = with_name
# result = cartesian(
# layouts, axis=axis, nested=nested, parameters=parameters, highlevel=False
# )
# return ak._v2._util.maybe_wrap(result, behavior, highlevel)
|
en
| 0.598567
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE # """ # Args: # arrays (dict or iterable of arrays): Arrays on which to compute the # Cartesian product. # axis (int): The dimension at which this operation is applied. The # outermost dimension is `0`, followed by `1`, etc., and negative # values count backward from the innermost: `-1` is the innermost # dimension, `-2` is the next level up, etc. # nested (None, True, False, or iterable of str or int): If None or # False, all combinations of elements from the `arrays` are # produced at the same level of nesting; if True, they are grouped # in nested lists by combinations that share a common item from # each of the `arrays`; if an iterable of str or int, group common # items for a chosen set of keys from the `array` dict or slots # of the `array` iterable. # parameters (None or dict): Parameters for the new # #ak.layout.RecordArray node that is created by this operation. # with_name (None or str): Assigns a `"__record__"` name to the new # #ak.layout.RecordArray node that is created by this operation # (overriding `parameters`, if necessary). # highlevel (bool): If True, return an #ak.Array; otherwise, return # a low-level #ak.layout.Content subclass. # behavior (None or dict): Custom #ak.behavior for the output array, if # high-level. # Computes a Cartesian product (i.e. cross product) of data from a set of # `arrays`, like #ak.cartesian, but returning integer indexes for # #ak.Array.__getitem__. # For example, the Cartesian product of # >>> one = ak.Array([1.1, 2.2, 3.3]) # >>> two = ak.Array(["a", "b"]) # is # >>> ak.to_list(ak.cartesian([one, two], axis=0)) # [(1.1, 'a'), (1.1, 'b'), (2.2, 'a'), (2.2, 'b'), (3.3, 'a'), (3.3, 'b')] # But with argcartesian, only the indexes are returned. # >>> ak.to_list(ak.argcartesian([one, two], axis=0)) # [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)] # These are the indexes that can select the items that go into the actual # Cartesian product. # >>> one_index, two_index = ak.unzip(ak.argcartesian([one, two], axis=0)) # >>> one[one_index] # <Array [1.1, 1.1, 2.2, 2.2, 3.3, 3.3] type='6 * float64'> # >>> two[two_index] # <Array ['a', 'b', 'a', 'b', 'a', 'b'] type='6 * string'> # All of the parameters for #ak.cartesian apply equally to #ak.argcartesian, # so see the #ak.cartesian documentation for a more complete description. # """ # if axis < 0: # raise ValueError( # "the 'axis' of argcartesian must be non-negative" # # ) # else: # if isinstance(arrays, dict): # behavior = ak._v2._util.behaviorof(*arrays.values(), behavior=behavior) # layouts = dict( # ( # n, # ak._v2.operations.convert.to_layout( # x, allow_record=False, allow_other=False # ).localindex(axis), # ) # for n, x in arrays.items() # ) # else: # behavior = ak._v2._util.behaviorof(*arrays, behavior=behavior) # layouts = [ # ak._v2.operations.convert.to_layout( # x, allow_record=False, allow_other=False # ).localindex(axis) # for x in arrays # ] # if with_name is not None: # if parameters is None: # parameters = {} # else: # parameters = dict(parameters) # parameters["__record__"] = with_name # result = cartesian( # layouts, axis=axis, nested=nested, parameters=parameters, highlevel=False # ) # return ak._v2._util.maybe_wrap(result, behavior, highlevel)
| 2.598128
| 3
|
remove_soln.py
|
jonathonfletcher/BiteSizeBayes
| 116
|
6629709
|
import nbformat as nbf
from glob import glob
# Collect a list of all notebooks in the content folder
filenames = glob("[01]*.ipynb")
text = '# Solution'
replacement = '# Solution goes here'
# Search through each notebook
for filename in sorted(filenames):
print('Removing solutions from', filename)
ntbk = nbf.read(filename, nbf.NO_CONVERT)
for cell in ntbk.cells:
# remove tags
if 'tags' in cell['metadata']:
cell['metadata']['tags'] = []
# remove output
if 'outputs' in cell:
cell['outputs'] = []
# remove solutions
if cell['source'].startswith(text):
cell['source'] = replacement
nbf.write(ntbk, filename)
|
import nbformat as nbf
from glob import glob
# Collect a list of all notebooks in the content folder
filenames = glob("[01]*.ipynb")
text = '# Solution'
replacement = '# Solution goes here'
# Search through each notebook
for filename in sorted(filenames):
print('Removing solutions from', filename)
ntbk = nbf.read(filename, nbf.NO_CONVERT)
for cell in ntbk.cells:
# remove tags
if 'tags' in cell['metadata']:
cell['metadata']['tags'] = []
# remove output
if 'outputs' in cell:
cell['outputs'] = []
# remove solutions
if cell['source'].startswith(text):
cell['source'] = replacement
nbf.write(ntbk, filename)
|
en
| 0.57633
|
# Collect a list of all notebooks in the content folder # Search through each notebook # remove tags # remove output # remove solutions
| 2.958391
| 3
|
src/pretalx/common/models/__init__.py
|
xhub/pretalx
| 0
|
6629710
|
<filename>src/pretalx/common/models/__init__.py
from .log import ActivityLog
from .settings import GlobalSettings
__all__ = ['ActivityLog', 'GlobalSettings']
|
<filename>src/pretalx/common/models/__init__.py
from .log import ActivityLog
from .settings import GlobalSettings
__all__ = ['ActivityLog', 'GlobalSettings']
|
none
| 1
| 1.188449
| 1
|
|
app/migrations/0001_initial.py
|
Thoma1999/exe_orientation_Q
| 1
|
6629711
|
<filename>app/migrations/0001_initial.py
# Generated by Django 2.1 on 2020-03-07 17:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Developers',
fields=[
('devID', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Gamecode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('groupcode', models.CharField(max_length=250)),
('questionNum', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='gameMaster',
fields=[
('GMID', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Groups',
fields=[
('GroupID', models.AutoField(primary_key=True, serialize=False)),
('GroupName', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='Hints',
fields=[
('HintText', models.CharField(max_length=100, primary_key=True, serialize=False)),
('HintNo', models.IntegerField()),
('Routes_NodeID', models.IntegerField()),
],
),
migrations.CreateModel(
name='Players',
fields=[
('playerID', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Questions',
fields=[
('auto_increment_id', models.AutoField(primary_key=True, serialize=False)),
('questions', models.CharField(max_length=100)),
('answers', models.CharField(max_length=100)),
('node_num', models.IntegerField()),
('hints', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='Routes',
fields=[
('routeID', models.IntegerField(primary_key=True, serialize=False)),
('Node', models.CharField(max_length=45)),
('NodeID', models.IntegerField()),
('RouteName', models.CharField(max_length=45)),
('gameMaster_GMID', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='app.gameMaster')),
],
),
migrations.CreateModel(
name='User',
fields=[
('userID', models.AutoField(primary_key=True, serialize=False)),
('username', models.CharField(max_length=45)),
],
),
migrations.AddField(
model_name='players',
name='user_userID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='app.User'),
),
migrations.AddField(
model_name='hints',
name='Routes_routeID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='app.Routes'),
),
migrations.AddField(
model_name='groups',
name='Players_playerID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='app.Players'),
),
migrations.AddField(
model_name='gamemaster',
name='user_userID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.User'),
),
migrations.AddField(
model_name='developers',
name='user_userID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.User'),
),
]
|
<filename>app/migrations/0001_initial.py
# Generated by Django 2.1 on 2020-03-07 17:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Developers',
fields=[
('devID', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Gamecode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('groupcode', models.CharField(max_length=250)),
('questionNum', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='gameMaster',
fields=[
('GMID', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Groups',
fields=[
('GroupID', models.AutoField(primary_key=True, serialize=False)),
('GroupName', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='Hints',
fields=[
('HintText', models.CharField(max_length=100, primary_key=True, serialize=False)),
('HintNo', models.IntegerField()),
('Routes_NodeID', models.IntegerField()),
],
),
migrations.CreateModel(
name='Players',
fields=[
('playerID', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Questions',
fields=[
('auto_increment_id', models.AutoField(primary_key=True, serialize=False)),
('questions', models.CharField(max_length=100)),
('answers', models.CharField(max_length=100)),
('node_num', models.IntegerField()),
('hints', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='Routes',
fields=[
('routeID', models.IntegerField(primary_key=True, serialize=False)),
('Node', models.CharField(max_length=45)),
('NodeID', models.IntegerField()),
('RouteName', models.CharField(max_length=45)),
('gameMaster_GMID', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='app.gameMaster')),
],
),
migrations.CreateModel(
name='User',
fields=[
('userID', models.AutoField(primary_key=True, serialize=False)),
('username', models.CharField(max_length=45)),
],
),
migrations.AddField(
model_name='players',
name='user_userID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='app.User'),
),
migrations.AddField(
model_name='hints',
name='Routes_routeID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='app.Routes'),
),
migrations.AddField(
model_name='groups',
name='Players_playerID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='app.Players'),
),
migrations.AddField(
model_name='gamemaster',
name='user_userID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.User'),
),
migrations.AddField(
model_name='developers',
name='user_userID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.User'),
),
]
|
en
| 0.850402
|
# Generated by Django 2.1 on 2020-03-07 17:33
| 1.801783
| 2
|
backend/tancho/config/config.py
|
DocDuck/PetRescue
| 0
|
6629712
|
<gh_stars>0
# backend/tancho/config/config.yml
from motor.motor_asyncio import AsyncIOMotorClient
import yaml
def load_config() -> dict:
with open('config/config.yml') as yaml_file:
conf = yaml.load(yaml_file.read(), Loader=yaml.SafeLoader)
return conf
CONF = load_config()
DB_CLIENT = AsyncIOMotorClient(
host=CONF.get("databases", dict())["default"]["HOST"],
port=CONF.get("databases", dict())["default"]["PORT"],
username=CONF.get("databases", dict())["default"]["USER"],
password=CONF.get("databases", dict())["default"]["PASSWORD"],
)
DB = DB_CLIENT[CONF.get("databases", dict())["default"]["NAME"]]
def close_db_client():
DB_CLIENT.close()
|
# backend/tancho/config/config.yml
from motor.motor_asyncio import AsyncIOMotorClient
import yaml
def load_config() -> dict:
with open('config/config.yml') as yaml_file:
conf = yaml.load(yaml_file.read(), Loader=yaml.SafeLoader)
return conf
CONF = load_config()
DB_CLIENT = AsyncIOMotorClient(
host=CONF.get("databases", dict())["default"]["HOST"],
port=CONF.get("databases", dict())["default"]["PORT"],
username=CONF.get("databases", dict())["default"]["USER"],
password=CONF.get("databases", dict())["default"]["PASSWORD"],
)
DB = DB_CLIENT[CONF.get("databases", dict())["default"]["NAME"]]
def close_db_client():
DB_CLIENT.close()
|
en
| 0.109215
|
# backend/tancho/config/config.yml
| 2.175358
| 2
|
pysot/models/backbone/__init__.py
|
eldercrow/tracking-pytorch
| 0
|
6629713
|
<reponame>eldercrow/tracking-pytorch<filename>pysot/models/backbone/__init__.py
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pysot.models.backbone.resnet import resnet18, resnet34, resnet50
from pysot.models.backbone.mobilenetv2 import mobilenet_v2
BACKBONES = {
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'mobilenetv2': mobilenet_v2
}
def get_backbone(name, **kwargs):
return BACKBONES[name](**kwargs)
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pysot.models.backbone.resnet import resnet18, resnet34, resnet50
from pysot.models.backbone.mobilenetv2 import mobilenet_v2
BACKBONES = {
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'mobilenetv2': mobilenet_v2
}
def get_backbone(name, **kwargs):
return BACKBONES[name](**kwargs)
|
en
| 0.792863
|
# Copyright (c) SenseTime. All Rights Reserved.
| 1.511229
| 2
|
tests/test_version_action.py
|
venairus/pynairus
| 2
|
6629714
|
<filename>tests/test_version_action.py
# coding: utf-8
"""Unit tests module for actions.version module."""
import unittest
import argparse
from pynairus.actions.version_action import VersionAction
class VersionActionTest(unittest.TestCase):
"""Unit test class for VersionAction."""
def test_inheritance(self):
"""Test the inheritance of the class."""
self.assertIn(argparse.Action, VersionAction.__bases__)
|
<filename>tests/test_version_action.py
# coding: utf-8
"""Unit tests module for actions.version module."""
import unittest
import argparse
from pynairus.actions.version_action import VersionAction
class VersionActionTest(unittest.TestCase):
"""Unit test class for VersionAction."""
def test_inheritance(self):
"""Test the inheritance of the class."""
self.assertIn(argparse.Action, VersionAction.__bases__)
|
en
| 0.650662
|
# coding: utf-8 Unit tests module for actions.version module. Unit test class for VersionAction. Test the inheritance of the class.
| 2.449563
| 2
|
lib/django-0.96/django/utils/timesince.py
|
MiCHiLU/google_appengine_sdk
| 790
|
6629715
|
<gh_stars>100-1000
import datetime, math, time
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ngettext
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between then and now
as a nicely formatted string, e.g "10 minutes"
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ngettext('year', 'years', n)),
(60 * 60 * 24 * 30, lambda n: ngettext('month', 'months', n)),
(60 * 60 * 24 * 7, lambda n : ngettext('week', 'weeks', n)),
(60 * 60 * 24, lambda n : ngettext('day', 'days', n)),
(60 * 60, lambda n: ngettext('hour', 'hours', n)),
(60, lambda n: ngettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison
if d.__class__ is not datetime.datetime:
d = datetime.datetime(d.year, d.month, d.day)
if now:
t = now.timetuple()
else:
t = time.localtime()
if d.tzinfo:
tz = LocalTimezone(d)
else:
tz = None
now = datetime.datetime(t[0], t[1], t[2], t[3], t[4], t[5], tzinfo=tz)
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
for i, (seconds, name) in enumerate(chunks):
count = since / seconds
if count != 0:
break
if count < 0:
return '%d milliseconds' % math.floor((now - d).microseconds / 1000)
s = '%d %s' % (count, name(count))
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) / seconds2
if count2 != 0:
s += ', %d %s' % (count2, name2(count2))
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if now == None:
now = datetime.datetime.now()
return timesince(now, d)
|
import datetime, math, time
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ngettext
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between then and now
as a nicely formatted string, e.g "10 minutes"
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ngettext('year', 'years', n)),
(60 * 60 * 24 * 30, lambda n: ngettext('month', 'months', n)),
(60 * 60 * 24 * 7, lambda n : ngettext('week', 'weeks', n)),
(60 * 60 * 24, lambda n : ngettext('day', 'days', n)),
(60 * 60, lambda n: ngettext('hour', 'hours', n)),
(60, lambda n: ngettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison
if d.__class__ is not datetime.datetime:
d = datetime.datetime(d.year, d.month, d.day)
if now:
t = now.timetuple()
else:
t = time.localtime()
if d.tzinfo:
tz = LocalTimezone(d)
else:
tz = None
now = datetime.datetime(t[0], t[1], t[2], t[3], t[4], t[5], tzinfo=tz)
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
for i, (seconds, name) in enumerate(chunks):
count = since / seconds
if count != 0:
break
if count < 0:
return '%d milliseconds' % math.floor((now - d).microseconds / 1000)
s = '%d %s' % (count, name(count))
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) / seconds2
if count2 != 0:
s += ', %d %s' % (count2, name2(count2))
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if now == None:
now = datetime.datetime.now()
return timesince(now, d)
|
en
| 0.789008
|
Takes two datetime objects and returns the time between then and now as a nicely formatted string, e.g "10 minutes" Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since # Convert datetime.date to datetime.datetime for comparison # ignore microsecond part of 'd' since we removed it from 'now' # Now get the second item Like timesince, but returns a string measuring the time until the given time.
| 2.81892
| 3
|
Django/Django11.py
|
bosichong/17python.com
| 9
|
6629716
|
#codeing=utf-8
# @Time : 2017-12-11
# @Author : py.sky
# @Mail : <EMAIL>
# @Site : www.17python.com
# @Title : # 实战:利用Django开发部署自己的个人博客(11)博客分类页及搜索页的实现
# @Url : http://www.17python.com/blog/62
# @Details : # 实战:利用Django开发部署自己的个人博客(11)博客分类页及搜索页的实现
# @Other : OS X 10.11.6
# Python 3.6.1
# VSCode 1.15.1
###################################
# 实战:利用Django开发部署自己的个人博客(11)博客分类页及搜索页的实现
###################################
'''
上节我们通过博客的模板继承及上下文沉浸器大大简化了模板的代码量,这节我们继续实现分类页及搜索结果页的展示。
## 缩短网址
之后我们访问博客地址是`http://127.0.0.1:8000/blog/home`,如果我们想直接访问地址也就是域名后直接打开博客首页怎么办?
首先修改'blog01.urls.py'中的代码:
url(r'^', include('myblog.urls',namespace='')),#修改为直接为域名根目录,缩短网址。
去掉`r'^blog'`中的`blog`,
再修改'myblog.urls.py'中的代码:
注释掉`# url(r'^list$', v.list, name='list'),`修改为`url(r'^$', v.list, name=''),#项目首页`
运行服务器,直接访问:`http://127.0.0.1:8000`,这时直接打开的是博客列表页展示的首页。
## Django实现博客分类页展示
首页展示的是所有文章页,我想点击分类显示为分类下边的相关文章,这个如何实现?
首先我们看下点击分类后,url的格式`http://127.0.0.1:8000/?c=2`这个分类页的连接中包括一个典型的get参数,
我们可以在视图中获得这个参数2,然后再搜索所有分类`id=2`的文章,然后返回给模板即可展示出来结果了。
编写视图文件的代码
c = request.GET.get('c', '')
if c:
#搜索分类ID为c的所有文章,如果分类id为空,这里就返回所有文章。
articles = articles.filter(category=c,).order_by('-create_time')
这里的变量c,通过`request.GET.get('c', '')`获取页面上的C的`key`,`request.GET['c']`也可以访问,但不安全。
然后通过`Django`中提供数据过滤`.filter`搜索出相关的文章,然后排序后返回给模板,模板那边的代码都不用修改即可正常显示。
好了,我们访问`http://127.0.0.1:8000/?c=2` 所有分类`id=2`文章就搜索出来了。
## 搜索结果页面
在当前的页面上边有个搜索框,这是一个搜索的入口,我们希望用户通过这个搜索框进行一些模糊搜索,比如根据关键字搜索标题或正文中是否包含关键字,然后返回搜索结果展示。
说到搜索,其实首先想到的是应该对这个搜索关键字做一些限制,比如长度,还有就是安全sql注入等。
我们的搜索结果url地址`http://127.0.0.1:8000/?s=Python`,应该是这个相似。
这里我们采用django提供的一个表单框架来实现当前的搜索功能,接下来开始编写代码。
创建myblog.forms.py:
#coding=utf-8
from django import forms
class Searchform(forms.Form):
#搜索表单定义
s = forms.CharField(max_length=20)
就这点代码,定义了一个表单。
在视图`myblog.views.py`中使用,先引用
from .forms import Searchform
然后编写代码
s = ''#搜索关键字
#以下判断表单是否验证成功,如果验证成功返回一个字符串s
if request.method == 'GET':
form = Searchform(request.GET)
if form.is_valid():
s = request.GET.get('s')
if s :
articles = articles.filter(Q(title__contains=s)|Q(content__contains=s)).order_by('-create_time')
这里有个Q函数记得引用一下`from django.db.models import Q#模糊查询多个字段使用`,这个Q函数是关键字查询中经常使用到的,详细了解请百度。
然后模板里修改一下模板`base.html`中搜索框的代码:
<form class="am-topbar-form am-topbar-left am-form-inline am-topbar-right" action="{% url '' %}">
<div class="am-form-group">
<input type="text" class="am-form-field am-input-sm" placeholder="搜索文章" name="s">
</div>
<button type="submit" class="am-btn am-btn-default am-btn-sm">搜索</button>
</form>
`{% url '' %}`这是模板中的标签,Django中提供了很多模板标签,如果需要详细了解可以查看官方的文档。
好了,我们刷新首页,然后在搜索框里输入:10 就会搜索标题有含有10的文章了,
![]()
到此我们的分类页及搜索结果页面的功能就实现了。
如果在修改中发现了错误,可以复制留言中的错误回复,站长看到会帮助解答的。另外可以查看源文件核对,看看自己哪里改的不对,也能发现错误。
'''
|
#codeing=utf-8
# @Time : 2017-12-11
# @Author : py.sky
# @Mail : <EMAIL>
# @Site : www.17python.com
# @Title : # 实战:利用Django开发部署自己的个人博客(11)博客分类页及搜索页的实现
# @Url : http://www.17python.com/blog/62
# @Details : # 实战:利用Django开发部署自己的个人博客(11)博客分类页及搜索页的实现
# @Other : OS X 10.11.6
# Python 3.6.1
# VSCode 1.15.1
###################################
# 实战:利用Django开发部署自己的个人博客(11)博客分类页及搜索页的实现
###################################
'''
上节我们通过博客的模板继承及上下文沉浸器大大简化了模板的代码量,这节我们继续实现分类页及搜索结果页的展示。
## 缩短网址
之后我们访问博客地址是`http://127.0.0.1:8000/blog/home`,如果我们想直接访问地址也就是域名后直接打开博客首页怎么办?
首先修改'blog01.urls.py'中的代码:
url(r'^', include('myblog.urls',namespace='')),#修改为直接为域名根目录,缩短网址。
去掉`r'^blog'`中的`blog`,
再修改'myblog.urls.py'中的代码:
注释掉`# url(r'^list$', v.list, name='list'),`修改为`url(r'^$', v.list, name=''),#项目首页`
运行服务器,直接访问:`http://127.0.0.1:8000`,这时直接打开的是博客列表页展示的首页。
## Django实现博客分类页展示
首页展示的是所有文章页,我想点击分类显示为分类下边的相关文章,这个如何实现?
首先我们看下点击分类后,url的格式`http://127.0.0.1:8000/?c=2`这个分类页的连接中包括一个典型的get参数,
我们可以在视图中获得这个参数2,然后再搜索所有分类`id=2`的文章,然后返回给模板即可展示出来结果了。
编写视图文件的代码
c = request.GET.get('c', '')
if c:
#搜索分类ID为c的所有文章,如果分类id为空,这里就返回所有文章。
articles = articles.filter(category=c,).order_by('-create_time')
这里的变量c,通过`request.GET.get('c', '')`获取页面上的C的`key`,`request.GET['c']`也可以访问,但不安全。
然后通过`Django`中提供数据过滤`.filter`搜索出相关的文章,然后排序后返回给模板,模板那边的代码都不用修改即可正常显示。
好了,我们访问`http://127.0.0.1:8000/?c=2` 所有分类`id=2`文章就搜索出来了。
## 搜索结果页面
在当前的页面上边有个搜索框,这是一个搜索的入口,我们希望用户通过这个搜索框进行一些模糊搜索,比如根据关键字搜索标题或正文中是否包含关键字,然后返回搜索结果展示。
说到搜索,其实首先想到的是应该对这个搜索关键字做一些限制,比如长度,还有就是安全sql注入等。
我们的搜索结果url地址`http://127.0.0.1:8000/?s=Python`,应该是这个相似。
这里我们采用django提供的一个表单框架来实现当前的搜索功能,接下来开始编写代码。
创建myblog.forms.py:
#coding=utf-8
from django import forms
class Searchform(forms.Form):
#搜索表单定义
s = forms.CharField(max_length=20)
就这点代码,定义了一个表单。
在视图`myblog.views.py`中使用,先引用
from .forms import Searchform
然后编写代码
s = ''#搜索关键字
#以下判断表单是否验证成功,如果验证成功返回一个字符串s
if request.method == 'GET':
form = Searchform(request.GET)
if form.is_valid():
s = request.GET.get('s')
if s :
articles = articles.filter(Q(title__contains=s)|Q(content__contains=s)).order_by('-create_time')
这里有个Q函数记得引用一下`from django.db.models import Q#模糊查询多个字段使用`,这个Q函数是关键字查询中经常使用到的,详细了解请百度。
然后模板里修改一下模板`base.html`中搜索框的代码:
<form class="am-topbar-form am-topbar-left am-form-inline am-topbar-right" action="{% url '' %}">
<div class="am-form-group">
<input type="text" class="am-form-field am-input-sm" placeholder="搜索文章" name="s">
</div>
<button type="submit" class="am-btn am-btn-default am-btn-sm">搜索</button>
</form>
`{% url '' %}`这是模板中的标签,Django中提供了很多模板标签,如果需要详细了解可以查看官方的文档。
好了,我们刷新首页,然后在搜索框里输入:10 就会搜索标题有含有10的文章了,
![]()
到此我们的分类页及搜索结果页面的功能就实现了。
如果在修改中发现了错误,可以复制留言中的错误回复,站长看到会帮助解答的。另外可以查看源文件核对,看看自己哪里改的不对,也能发现错误。
'''
|
zh
| 0.897582
|
#codeing=utf-8 # @Time : 2017-12-11 # @Author : py.sky # @Mail : <EMAIL> # @Site : www.17python.com # @Title : # 实战:利用Django开发部署自己的个人博客(11)博客分类页及搜索页的实现 # @Url : http://www.17python.com/blog/62 # @Details : # 实战:利用Django开发部署自己的个人博客(11)博客分类页及搜索页的实现 # @Other : OS X 10.11.6 # Python 3.6.1 # VSCode 1.15.1 ################################### # 实战:利用Django开发部署自己的个人博客(11)博客分类页及搜索页的实现 ################################### 上节我们通过博客的模板继承及上下文沉浸器大大简化了模板的代码量,这节我们继续实现分类页及搜索结果页的展示。 ## 缩短网址 之后我们访问博客地址是`http://127.0.0.1:8000/blog/home`,如果我们想直接访问地址也就是域名后直接打开博客首页怎么办? 首先修改'blog01.urls.py'中的代码: url(r'^', include('myblog.urls',namespace='')),#修改为直接为域名根目录,缩短网址。 去掉`r'^blog'`中的`blog`, 再修改'myblog.urls.py'中的代码: 注释掉`# url(r'^list$', v.list, name='list'),`修改为`url(r'^$', v.list, name=''),#项目首页` 运行服务器,直接访问:`http://127.0.0.1:8000`,这时直接打开的是博客列表页展示的首页。 ## Django实现博客分类页展示 首页展示的是所有文章页,我想点击分类显示为分类下边的相关文章,这个如何实现? 首先我们看下点击分类后,url的格式`http://127.0.0.1:8000/?c=2`这个分类页的连接中包括一个典型的get参数, 我们可以在视图中获得这个参数2,然后再搜索所有分类`id=2`的文章,然后返回给模板即可展示出来结果了。 编写视图文件的代码 c = request.GET.get('c', '') if c: #搜索分类ID为c的所有文章,如果分类id为空,这里就返回所有文章。 articles = articles.filter(category=c,).order_by('-create_time') 这里的变量c,通过`request.GET.get('c', '')`获取页面上的C的`key`,`request.GET['c']`也可以访问,但不安全。 然后通过`Django`中提供数据过滤`.filter`搜索出相关的文章,然后排序后返回给模板,模板那边的代码都不用修改即可正常显示。 好了,我们访问`http://127.0.0.1:8000/?c=2` 所有分类`id=2`文章就搜索出来了。 ## 搜索结果页面 在当前的页面上边有个搜索框,这是一个搜索的入口,我们希望用户通过这个搜索框进行一些模糊搜索,比如根据关键字搜索标题或正文中是否包含关键字,然后返回搜索结果展示。 说到搜索,其实首先想到的是应该对这个搜索关键字做一些限制,比如长度,还有就是安全sql注入等。 我们的搜索结果url地址`http://127.0.0.1:8000/?s=Python`,应该是这个相似。 这里我们采用django提供的一个表单框架来实现当前的搜索功能,接下来开始编写代码。 创建myblog.forms.py: #coding=utf-8 from django import forms class Searchform(forms.Form): #搜索表单定义 s = forms.CharField(max_length=20) 就这点代码,定义了一个表单。 在视图`myblog.views.py`中使用,先引用 from .forms import Searchform 然后编写代码 s = ''#搜索关键字 #以下判断表单是否验证成功,如果验证成功返回一个字符串s if request.method == 'GET': form = Searchform(request.GET) if form.is_valid(): s = request.GET.get('s') if s : articles = articles.filter(Q(title__contains=s)|Q(content__contains=s)).order_by('-create_time') 这里有个Q函数记得引用一下`from django.db.models import Q#模糊查询多个字段使用`,这个Q函数是关键字查询中经常使用到的,详细了解请百度。 然后模板里修改一下模板`base.html`中搜索框的代码: <form class="am-topbar-form am-topbar-left am-form-inline am-topbar-right" action="{% url '' %}"> <div class="am-form-group"> <input type="text" class="am-form-field am-input-sm" placeholder="搜索文章" name="s"> </div> <button type="submit" class="am-btn am-btn-default am-btn-sm">搜索</button> </form> `{% url '' %}`这是模板中的标签,Django中提供了很多模板标签,如果需要详细了解可以查看官方的文档。 好了,我们刷新首页,然后在搜索框里输入:10 就会搜索标题有含有10的文章了, ![]() 到此我们的分类页及搜索结果页面的功能就实现了。 如果在修改中发现了错误,可以复制留言中的错误回复,站长看到会帮助解答的。另外可以查看源文件核对,看看自己哪里改的不对,也能发现错误。
| 2.099963
| 2
|
tests/test_extends.py
|
mefolder/extends
| 0
|
6629717
|
<gh_stars>0
from extends import __version__
from extends import extends
def test_version():
assert __version__ == '0.3.0'
def test_extension():
class Person:
def __init__(self, name: str, age: int):
self.name = name
self.age = age
@extends(Person)
def __str__(self: Person) -> str:
return f'{self.name}, {self.age} years old.'
assert str(Person('Alice', 22)) == 'Alice, 22 years old.'
|
from extends import __version__
from extends import extends
def test_version():
assert __version__ == '0.3.0'
def test_extension():
class Person:
def __init__(self, name: str, age: int):
self.name = name
self.age = age
@extends(Person)
def __str__(self: Person) -> str:
return f'{self.name}, {self.age} years old.'
assert str(Person('Alice', 22)) == 'Alice, 22 years old.'
|
none
| 1
| 3.108433
| 3
|
|
2012/q1-primefactorisation.py
|
OojAmit/british-informatics-olympiad
| 11
|
6629718
|
# A solution to the British Informatics Olympiad 2012 Question 1
# Scores 24/24 Marks
from math import sqrt
n = int(input())
a = int(sqrt(n))
numbers = set(range(2, a))
primes = set()
while numbers:
curr = min(numbers)
primes.add(curr)
numbers.discard(curr)
for i in range(curr*2, a, curr):
numbers.discard(i)
factors = 1
for prime in primes:
if n % prime == 0:
factors = factors * prime
if factors == 1:
factors = n
print factors
|
# A solution to the British Informatics Olympiad 2012 Question 1
# Scores 24/24 Marks
from math import sqrt
n = int(input())
a = int(sqrt(n))
numbers = set(range(2, a))
primes = set()
while numbers:
curr = min(numbers)
primes.add(curr)
numbers.discard(curr)
for i in range(curr*2, a, curr):
numbers.discard(i)
factors = 1
for prime in primes:
if n % prime == 0:
factors = factors * prime
if factors == 1:
factors = n
print factors
|
en
| 0.6541
|
# A solution to the British Informatics Olympiad 2012 Question 1 # Scores 24/24 Marks
| 3.480016
| 3
|
tests/zoomus/components/rooms/test_update.py
|
Crack-The-Code-PE/zoomus
| 178
|
6629719
|
<reponame>Crack-The-Code-PE/zoomus<gh_stars>100-1000
import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(UpdateV2TestCase))
return suite
class UpdateV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.room.RoomComponentV2(
base_uri="http://foo.com",
config={
"token": "token",
"version": util.API_VERSION_2,
},
)
@responses.activate
def test_can_update(self):
responses.add(responses.PATCH, "http://foo.com/rooms/42")
response = self.component.update(id="42")
self.assertEqual(response.request.body, '{"id": "42"}')
def test_requires_room_id(self):
with self.assertRaisesRegexp(ValueError, "'id' must be set"):
self.component.update()
if __name__ == "__main__":
unittest.main()
|
import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(UpdateV2TestCase))
return suite
class UpdateV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.room.RoomComponentV2(
base_uri="http://foo.com",
config={
"token": "token",
"version": util.API_VERSION_2,
},
)
@responses.activate
def test_can_update(self):
responses.add(responses.PATCH, "http://foo.com/rooms/42")
response = self.component.update(id="42")
self.assertEqual(response.request.body, '{"id": "42"}')
def test_requires_room_id(self):
with self.assertRaisesRegexp(ValueError, "'id' must be set"):
self.component.update()
if __name__ == "__main__":
unittest.main()
|
en
| 0.4845
|
Define all the tests of the module.
| 2.838437
| 3
|
chintai-scrape/D001_feature_check_and_shrink.py
|
GINK03/itmedia-scraping
| 16
|
6629720
|
<reponame>GINK03/itmedia-scraping<filename>chintai-scrape/D001_feature_check_and_shrink.py
import MeCab
import pandas as pd
df = pd.read_csv('./lexical_parsed.csv')
m = MeCab.Tagger('-Owakati')
m.parse("")
feat_freq = {}
for obj in df.to_dict('record'):
# print(obj)
com = obj['shuunou']
try:
for feat in set(m.parse(com).strip().split()):
if feat_freq.get(feat) is None:
feat_freq[feat] = 0
feat_freq[feat] += 1
except:
...
for feat, freq in sorted(feat_freq.items(), key=lambda x: x[1]):
print(feat, freq)
('communication', 'インターネット接続,BS,CATV,地上デジタル,無料,光ファイバー')
('kitchen', '別,ガスコンロ,洗面化粧台,衛生的,温水洗浄便座')
('other', 'エアコン,フローリング,バルコニー,置き場,ベランダ')
('secure', 'モニター,フォン,オートロック,インターホン,宅配ボックス')
|
import MeCab
import pandas as pd
df = pd.read_csv('./lexical_parsed.csv')
m = MeCab.Tagger('-Owakati')
m.parse("")
feat_freq = {}
for obj in df.to_dict('record'):
# print(obj)
com = obj['shuunou']
try:
for feat in set(m.parse(com).strip().split()):
if feat_freq.get(feat) is None:
feat_freq[feat] = 0
feat_freq[feat] += 1
except:
...
for feat, freq in sorted(feat_freq.items(), key=lambda x: x[1]):
print(feat, freq)
('communication', 'インターネット接続,BS,CATV,地上デジタル,無料,光ファイバー')
('kitchen', '別,ガスコンロ,洗面化粧台,衛生的,温水洗浄便座')
('other', 'エアコン,フローリング,バルコニー,置き場,ベランダ')
('secure', 'モニター,フォン,オートロック,インターホン,宅配ボックス')
|
en
| 0.187129
|
# print(obj)
| 2.726207
| 3
|
test/converter/test_bool.py
|
thombashi/pytypeutil
| 18
|
6629721
|
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import pytest
import typepy
from ._common import convert_wrapper
class Test_Bool:
@pytest.mark.parametrize(
["method", "strict_level", "value", "expected"],
[
["convert", 0, True, True],
["convert", 0, False, False],
["convert", 0, "true", True],
["convert", 0, "FALSE", False],
["convert", 0, 1, True],
["convert", 0, 1.1, "E"],
["convert", 0, None, "E"],
["convert", 1, True, True],
["convert", 1, "true", True],
["convert", 1, "FALSE", False],
["convert", 1, 1, "E"],
["convert", 1, 1.1, "E"],
["convert", 1, None, "E"],
["convert", 2, True, True],
["convert", 2, "true", "E"],
["convert", 2, "FALSE", "E"],
["convert", 2, 1, "E"],
["convert", 2, 1.1, "E"],
["convert", 2, None, "E"],
["try_convert", 0, True, True],
["try_convert", 0, "true", True],
["try_convert", 0, "FALSE", False],
["try_convert", 0, 1, True],
["try_convert", 0, 1.1, None],
["try_convert", 0, None, None],
["try_convert", 1, True, True],
["try_convert", 1, "true", True],
["try_convert", 1, "FALSE", False],
["try_convert", 1, 1, None],
["try_convert", 1, 1.1, None],
["try_convert", 1, None, None],
["try_convert", 2, True, True],
["try_convert", 2, "true", None],
["try_convert", 2, "FALSE", None],
["try_convert", 2, 1, None],
["try_convert", 2, 1.1, None],
["try_convert", 2, None, None],
["force_convert", 0, True, True],
["force_convert", 0, "true", True],
["force_convert", 0, "FALSE", False],
["force_convert", 0, 1, True],
["force_convert", 0, 1.1, "E"],
["force_convert", 0, None, "E"],
["force_convert", 1, True, True],
["force_convert", 1, "true", True],
["force_convert", 1, "FALSE", False],
["force_convert", 1, 1, True],
["force_convert", 1, 1.1, "E"],
["force_convert", 1, None, "E"],
["force_convert", 2, True, True],
["force_convert", 2, "true", True],
["force_convert", 2, "FALSE", False],
["force_convert", 2, 1, True],
["force_convert", 2, 1.1, "E"],
["force_convert", 2, None, "E"],
],
)
def test_normal(self, method, strict_level, value, expected):
assert convert_wrapper(typepy.Bool(value, strict_level), method) == expected
|
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import pytest
import typepy
from ._common import convert_wrapper
class Test_Bool:
@pytest.mark.parametrize(
["method", "strict_level", "value", "expected"],
[
["convert", 0, True, True],
["convert", 0, False, False],
["convert", 0, "true", True],
["convert", 0, "FALSE", False],
["convert", 0, 1, True],
["convert", 0, 1.1, "E"],
["convert", 0, None, "E"],
["convert", 1, True, True],
["convert", 1, "true", True],
["convert", 1, "FALSE", False],
["convert", 1, 1, "E"],
["convert", 1, 1.1, "E"],
["convert", 1, None, "E"],
["convert", 2, True, True],
["convert", 2, "true", "E"],
["convert", 2, "FALSE", "E"],
["convert", 2, 1, "E"],
["convert", 2, 1.1, "E"],
["convert", 2, None, "E"],
["try_convert", 0, True, True],
["try_convert", 0, "true", True],
["try_convert", 0, "FALSE", False],
["try_convert", 0, 1, True],
["try_convert", 0, 1.1, None],
["try_convert", 0, None, None],
["try_convert", 1, True, True],
["try_convert", 1, "true", True],
["try_convert", 1, "FALSE", False],
["try_convert", 1, 1, None],
["try_convert", 1, 1.1, None],
["try_convert", 1, None, None],
["try_convert", 2, True, True],
["try_convert", 2, "true", None],
["try_convert", 2, "FALSE", None],
["try_convert", 2, 1, None],
["try_convert", 2, 1.1, None],
["try_convert", 2, None, None],
["force_convert", 0, True, True],
["force_convert", 0, "true", True],
["force_convert", 0, "FALSE", False],
["force_convert", 0, 1, True],
["force_convert", 0, 1.1, "E"],
["force_convert", 0, None, "E"],
["force_convert", 1, True, True],
["force_convert", 1, "true", True],
["force_convert", 1, "FALSE", False],
["force_convert", 1, 1, True],
["force_convert", 1, 1.1, "E"],
["force_convert", 1, None, "E"],
["force_convert", 2, True, True],
["force_convert", 2, "true", True],
["force_convert", 2, "FALSE", False],
["force_convert", 2, 1, True],
["force_convert", 2, 1.1, "E"],
["force_convert", 2, None, "E"],
],
)
def test_normal(self, method, strict_level, value, expected):
assert convert_wrapper(typepy.Bool(value, strict_level), method) == expected
|
ml
| 0.117521
|
.. codeauthor:: <NAME> <<EMAIL>>
| 2.232873
| 2
|
apps/tests/log_esquery/test_esquery.py
|
kiritoscs/bk-log
| 0
|
6629722
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
We undertake not to change the open source license (MIT license) applicable to the current version of
the project delivered to anyone in the future.
"""
import copy
from unittest.mock import patch
from django.test import TestCase
from apps.log_databus.models import CollectorConfig
from apps.log_esquery.esquery.esquery import EsQuery
from apps.log_search.exceptions import (
ScenarioNotSupportedException,
ScenarioQueryIndexFailException,
IndexResultTableApiException,
)
from apps.log_search.models import Scenario
from django_fakeredis import FakeRedis
BK_BIZ_ID = 2
STORAGE_CLUSTER_NAME = "cluster_name"
RESULT_TABLE_ID = "2_bklog.test3333"
RESULT_TABLE_NAME_ALIAS = "test3333"
SEARCH_DICT = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog.search",
"start_time": "2020-03-21 07:00:00",
"end_time": "2020-03-22 23:59:59",
"time_zone": "Asia/Shanghai",
"query_string": "*",
"filter": [
{"field": "key1", "operator": "is", "value": "127.0.0.1", "condition": "and"},
{"field": "key2", "operator": "is one of", "value": "val2", "condition": "or"},
{"field": "key3", "operator": "is not", "value": "val3", "condition": "and"},
],
"sort_list": [],
"size": 1,
"start": 0,
"aggs": None,
"highlight": None,
"debug": True,
}
SCROLL_DICT = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog_search_20200320*,2_bklog_search_20200321*,2_bklog_search_20200322*",
"scroll_id": "123213",
"scroll": "1m",
}
DSL_DICT = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog_search_20200320*,2_bklog_search_20200321*,2_bklog_search_20200322*",
}
MAPPING_DICT = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog_search_20200320*,2_bklog_search_20200321*,2_bklog_search_20200322*",
}
MAPPING_DICT_TIME = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog_search",
"start_time": "2020-12-29 16:19:47",
"end_time": "2020-12-29 20:19:47",
"time_zone": "Asia/Shanghai",
}
INDICES_DICT = {"scenario_id": Scenario.LOG, "indices": "2_bklog.test3333", "bk_biz_id": BK_BIZ_ID}
GET_CLUSTER_INFO_DICT = {"scenario_id": Scenario.LOG, "indices": "2_bklog.test3333", "bk_biz_id": BK_BIZ_ID}
GET_CLUSTER_INFO_EXCEPTION_DICT = {"scenario_id": Scenario.LOG, "indices": "2_bklog.test4444", "bk_biz_id": BK_BIZ_ID}
SEARCH_RESULT = {
"scenario": Scenario.LOG,
"indices": "2_bklog_search_20200322*,2_bklog_search_20200320*,2_bklog_search_20200321*",
"body": {
"from": 0,
"size": 1,
"query": {
"bool": {
"filter": [
{"query_string": {"query": "*", "analyze_wildcard": True}},
{"range": {"": {"gte": 1584745200, "lte": 1584892799, "format": "epoch_second"}}},
{
"bool": {
"should": [
{
"bool": {
"must": [{"match_phrase": {"key1": {"query": "127.0.0.1"}}}],
"must_not": [],
}
},
{
"bool": {
"must": [
{
"bool": {
"should": [
{"match_phrase": {"key2": "v"}},
{"match_phrase": {"key2": "a"}},
{"match_phrase": {"key2": "l"}},
{"match_phrase": {"key2": "2"}},
]
}
}
],
"must_not": [{"match_phrase": {"key3": {"query": "val3"}}}],
}
},
]
}
},
]
}
},
},
}
ES_QUERY_LOG_INDICES = "2_bklog_search_20200320*,2_bklog_search_20200321*,2_bklog_search_20200322*"
ES_QUERY_BKDATA_INDICES = "2_bklog.search_20200321*,2_bklog.search_20200322*"
ES_QUERY_ES_INDICES = "2_bklog.search"
ES_QUERY_INDICES = {
Scenario.LOG: ES_QUERY_LOG_INDICES,
Scenario.BKDATA: ES_QUERY_BKDATA_INDICES,
Scenario.ES: ES_QUERY_ES_INDICES,
}
CLUSTER_INFOS = {
"2_bklog.test3333": {
"cluster_config": {
"domain_name": "1.1.1.1",
"port": 10000,
"version": "1.0",
"cluster_id": 231,
"cluster_name": STORAGE_CLUSTER_NAME,
},
"auth_info": {"username": "admin", "password": "<PASSWORD>"},
}
}
SCROLL_RESULT_LOG = {}
SCROLL_RESULT_ES = {}
QUERY_RESULT_LOG = {}
QUERY_RESULT_BKDATA = {}
QUERY_RESULT_ES = {}
DSL_RESULT_LOG = {"dsl": "{}"}
DSL_RESULT_BKDATA = {"dsl": "{}"}
DSL_RESULT_ES = {"dsl": "{}"}
MAPPING_RESULT_LOG = []
MAPPING_RESULT_BKDATA = []
MAPPING_RESULT_ES = []
INDICES_RESULT_WITHOUT_STORAGE = [
{
"result_table_id": RESULT_TABLE_ID,
"result_table_name_alias": RESULT_TABLE_NAME_ALIAS,
"bk_biz_id": BK_BIZ_ID,
"collector_config_id": 231,
}
]
INDICES_RESULT_WITH_STORAGE = [
{
"bk_biz_id": BK_BIZ_ID,
"collector_config_id": 231,
"result_table_id": RESULT_TABLE_ID,
"result_table_name_alias": RESULT_TABLE_NAME_ALIAS,
"storage_cluster_id": 231,
"storage_cluster_name": STORAGE_CLUSTER_NAME,
}
]
CONFIG_DATA = {
"result_table_config": {"bk_biz_id": BK_BIZ_ID},
"result_table_storage": {
"2_bklog.test3333": {"cluster_config": {"cluster_id": 231, "cluster_name": STORAGE_CLUSTER_NAME}}
},
}
GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA = {
"result_table_config": {"bk_biz_id": BK_BIZ_ID},
"result_table_storage": {
"2_bklog.test444": {"cluster_config": {"cluster_id": 231, "cluster_name": STORAGE_CLUSTER_NAME}}
},
}
GET_CLUSTER_INFO_RESULT = {
"bk_biz_id": BK_BIZ_ID,
"storage_cluster_id": 231,
"storage_cluster_name": STORAGE_CLUSTER_NAME,
}
@FakeRedis("apps.utils.cache.cache")
class TestEsquery(TestCase):
def test_search_debug(self):
"""
测试 esquery.search()
"""
# 原生ES bkdata 第三方ES
scenario_list = [Scenario.LOG, Scenario.BKDATA, Scenario.ES]
for scenario in scenario_list:
assert_result = copy.deepcopy(SEARCH_RESULT)
params = copy.deepcopy(SEARCH_DICT)
params.update({"scenario_id": scenario})
assert_result.update({"scenario": scenario})
res = EsQuery(params).search()
# response indices排序
res["indices"] = ",".join(sorted(res["indices"].split(",")))
# assert value indices排序
assert_result["indices"] = ",".join(sorted(ES_QUERY_INDICES[scenario].split(",")))
self.maxDiff = 10000
self.assertEqual(res, assert_result)
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.query", return_value=QUERY_RESULT_LOG)
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.query", return_value=QUERY_RESULT_ES)
@patch(
"apps.log_esquery.esquery.client.QueryClientBkData.QueryClientBkData.query", return_value=QUERY_RESULT_BKDATA
)
def test_search(self, *args, **kwargs):
# 原生ES
params = copy.deepcopy(SEARCH_DICT)
params.pop("debug")
res = EsQuery(params).search()
if res.get("indices"):
# response indices排序
res["indices"] = ",".join(sorted(res["indices"].split(",")))
self.assertEqual(res, {})
# BKDATA
params = copy.deepcopy(SEARCH_DICT)
params.pop("debug")
params.update({"scenario_id": Scenario.BKDATA})
res = EsQuery(params).search()
if res.get("indices"):
res["indices"] = ",".join(sorted(res["indices"].split(",")))
self.assertEqual(res, {})
# 第三方ES
params = copy.deepcopy(SEARCH_DICT)
params.pop("debug")
params.update({"scenario_id": Scenario.ES})
res = EsQuery(params).search()
if res.get("indices"):
res["indices"] = ",".join(sorted(res["indices"].split(",")))
self.assertEqual(res, {})
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.scroll", return_value=SCROLL_RESULT_ES)
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.scroll", return_value=SCROLL_RESULT_LOG)
def test_scroll(self, *args, **kwargs):
"""
测试 esquery.scroll()
"""
# 原生ES
params = copy.deepcopy(SCROLL_DICT)
result = EsQuery(params).scroll()
self.assertEqual(result, SCROLL_RESULT_LOG)
# 第三方ES
params = copy.deepcopy(SCROLL_DICT)
params.update({"scenario_id": Scenario.ES})
result = EsQuery(params).scroll()
self.assertEqual(result, SCROLL_RESULT_ES)
def test_scroll_exception(self):
"""
测试 scroll不支持bkdata场景 ScenarioNotSupportedException
"""
params = copy.deepcopy(SCROLL_DICT)
params.update({"scenario_id": Scenario.BKDATA})
with self.assertRaises(ScenarioNotSupportedException):
EsQuery(params).scroll()
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.query", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientBkData.QueryClientBkData.query", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.query", return_value={})
def test_dsl(self, *args, **kwargs):
"""
测试 esquery.dsl()
"""
# 原生ES
params = copy.deepcopy(DSL_DICT)
result = EsQuery(params).dsl()
self.assertEqual(result, DSL_RESULT_LOG)
# bkdata
params = copy.deepcopy(DSL_DICT)
params.update({"scenario_id": Scenario.BKDATA})
result = EsQuery(params).dsl()
self.assertEqual(result, DSL_RESULT_BKDATA)
# 第三方ES
params = copy.deepcopy(DSL_DICT)
params.update({"scenario_id": Scenario.ES})
result = EsQuery(params).dsl()
self.assertEqual(result, DSL_RESULT_ES)
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.mapping", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientBkData.QueryClientBkData.mapping", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.mapping", return_value={})
def test_mapping(self, *args, **kwargs):
# 原生ES
params = copy.deepcopy(MAPPING_DICT)
result = EsQuery(params).mapping()
self.assertEqual(result, MAPPING_RESULT_LOG)
# bkdata
params = copy.deepcopy(MAPPING_DICT)
params.update({"scenario_id": Scenario.BKDATA})
result = EsQuery(params).mapping()
self.assertEqual(result, MAPPING_RESULT_BKDATA)
# 第三方ES
params = copy.deepcopy(MAPPING_DICT)
params.update({"scenario_id": Scenario.ES})
result = EsQuery(params).mapping()
self.assertEqual(result, MAPPING_RESULT_ES)
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.mapping", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientBkData.QueryClientBkData.mapping", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.mapping", return_value={})
def test_mapping_time(self, *args, **kwargs):
# 自定义时间
params = copy.deepcopy(MAPPING_DICT_TIME)
params.update({"scenario_id": Scenario.LOG})
result = EsQuery(params).mapping()
self.assertEqual(result, MAPPING_RESULT_LOG)
@patch("apps.api.TransferApi.get_result_table_storage", lambda _: CLUSTER_INFOS)
def test_indices(self):
"""
测试 EsQuery.indices()
"""
# 测试数据库添加一条CollectorConfig数据
CollectorConfig.objects.create(
collector_config_id=231,
collector_config_name="test3333",
bk_app_code="bk_log_search",
collector_scenario_id="row",
bk_biz_id=BK_BIZ_ID,
category_id="os",
target_object_type="HOST",
target_node_type="TOPO",
target_nodes=[{"bk_inst_id": 52, "bk_obj_id": "module"}],
target_subscription_diff={},
description="test3333",
is_active=True,
bk_data_id=1500586,
table_id="2_bklog.test3333",
subscription_id=2103,
task_id_list=["1331697"],
)
params = copy.deepcopy(INDICES_DICT)
result = EsQuery(params).indices()
self.assertEqual(result, INDICES_RESULT_WITHOUT_STORAGE)
params.update({"with_storage": True})
result = EsQuery(params).indices()
self.assertEqual(result, INDICES_RESULT_WITH_STORAGE)
def test_indices_exception(self):
"""
测试 ScenarioQueryIndexFailException
"""
# bkdata
params = copy.deepcopy(INDICES_DICT)
params.update({"scenario_id": Scenario.BKDATA})
# 删除 bk_biz_id 键值对
params.pop("bk_biz_id")
with self.assertRaises(ScenarioQueryIndexFailException):
EsQuery(params).indices()
@patch("apps.utils.thread.MultiExecuteFunc.append", return_value=[])
@patch("apps.utils.thread.MultiExecuteFunc.run", return_value=CONFIG_DATA)
def test_get_cluster_info(self, *args, **kwargs):
"""
测试 EsQuery.get_cluster_info()
"""
params = copy.deepcopy(GET_CLUSTER_INFO_DICT)
result = EsQuery(params).get_cluster_info()
self.assertEqual(result, GET_CLUSTER_INFO_RESULT)
@patch("apps.utils.thread.MultiExecuteFunc.append", return_value=[])
@patch("apps.utils.thread.MultiExecuteFunc.run")
def test_get_cluster_info_exception(self, config_data, *args, **kwargs):
"""
测试 IndexResultTableApiException
"""
params = copy.deepcopy(GET_CLUSTER_INFO_EXCEPTION_DICT)
config_data.return_value = copy.deepcopy(GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA).pop("result_table_config")
with self.assertRaises(IndexResultTableApiException):
EsQuery(params).get_cluster_info()
config_data.return_value = copy.deepcopy(GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA).pop("result_table_storage")
with self.assertRaises(IndexResultTableApiException):
EsQuery(params).get_cluster_info()
GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA.update({"result_table_storage": {}})
config_data.return_value = GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA
with self.assertRaises(IndexResultTableApiException):
EsQuery(params).get_cluster_info()
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
We undertake not to change the open source license (MIT license) applicable to the current version of
the project delivered to anyone in the future.
"""
import copy
from unittest.mock import patch
from django.test import TestCase
from apps.log_databus.models import CollectorConfig
from apps.log_esquery.esquery.esquery import EsQuery
from apps.log_search.exceptions import (
ScenarioNotSupportedException,
ScenarioQueryIndexFailException,
IndexResultTableApiException,
)
from apps.log_search.models import Scenario
from django_fakeredis import FakeRedis
BK_BIZ_ID = 2
STORAGE_CLUSTER_NAME = "cluster_name"
RESULT_TABLE_ID = "2_bklog.test3333"
RESULT_TABLE_NAME_ALIAS = "test3333"
SEARCH_DICT = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog.search",
"start_time": "2020-03-21 07:00:00",
"end_time": "2020-03-22 23:59:59",
"time_zone": "Asia/Shanghai",
"query_string": "*",
"filter": [
{"field": "key1", "operator": "is", "value": "127.0.0.1", "condition": "and"},
{"field": "key2", "operator": "is one of", "value": "val2", "condition": "or"},
{"field": "key3", "operator": "is not", "value": "val3", "condition": "and"},
],
"sort_list": [],
"size": 1,
"start": 0,
"aggs": None,
"highlight": None,
"debug": True,
}
SCROLL_DICT = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog_search_20200320*,2_bklog_search_20200321*,2_bklog_search_20200322*",
"scroll_id": "123213",
"scroll": "1m",
}
DSL_DICT = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog_search_20200320*,2_bklog_search_20200321*,2_bklog_search_20200322*",
}
MAPPING_DICT = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog_search_20200320*,2_bklog_search_20200321*,2_bklog_search_20200322*",
}
MAPPING_DICT_TIME = {
"scenario_id": Scenario.LOG,
"indices": "2_bklog_search",
"start_time": "2020-12-29 16:19:47",
"end_time": "2020-12-29 20:19:47",
"time_zone": "Asia/Shanghai",
}
INDICES_DICT = {"scenario_id": Scenario.LOG, "indices": "2_bklog.test3333", "bk_biz_id": BK_BIZ_ID}
GET_CLUSTER_INFO_DICT = {"scenario_id": Scenario.LOG, "indices": "2_bklog.test3333", "bk_biz_id": BK_BIZ_ID}
GET_CLUSTER_INFO_EXCEPTION_DICT = {"scenario_id": Scenario.LOG, "indices": "2_bklog.test4444", "bk_biz_id": BK_BIZ_ID}
SEARCH_RESULT = {
"scenario": Scenario.LOG,
"indices": "2_bklog_search_20200322*,2_bklog_search_20200320*,2_bklog_search_20200321*",
"body": {
"from": 0,
"size": 1,
"query": {
"bool": {
"filter": [
{"query_string": {"query": "*", "analyze_wildcard": True}},
{"range": {"": {"gte": 1584745200, "lte": 1584892799, "format": "epoch_second"}}},
{
"bool": {
"should": [
{
"bool": {
"must": [{"match_phrase": {"key1": {"query": "127.0.0.1"}}}],
"must_not": [],
}
},
{
"bool": {
"must": [
{
"bool": {
"should": [
{"match_phrase": {"key2": "v"}},
{"match_phrase": {"key2": "a"}},
{"match_phrase": {"key2": "l"}},
{"match_phrase": {"key2": "2"}},
]
}
}
],
"must_not": [{"match_phrase": {"key3": {"query": "val3"}}}],
}
},
]
}
},
]
}
},
},
}
ES_QUERY_LOG_INDICES = "2_bklog_search_20200320*,2_bklog_search_20200321*,2_bklog_search_20200322*"
ES_QUERY_BKDATA_INDICES = "2_bklog.search_20200321*,2_bklog.search_20200322*"
ES_QUERY_ES_INDICES = "2_bklog.search"
ES_QUERY_INDICES = {
Scenario.LOG: ES_QUERY_LOG_INDICES,
Scenario.BKDATA: ES_QUERY_BKDATA_INDICES,
Scenario.ES: ES_QUERY_ES_INDICES,
}
CLUSTER_INFOS = {
"2_bklog.test3333": {
"cluster_config": {
"domain_name": "1.1.1.1",
"port": 10000,
"version": "1.0",
"cluster_id": 231,
"cluster_name": STORAGE_CLUSTER_NAME,
},
"auth_info": {"username": "admin", "password": "<PASSWORD>"},
}
}
SCROLL_RESULT_LOG = {}
SCROLL_RESULT_ES = {}
QUERY_RESULT_LOG = {}
QUERY_RESULT_BKDATA = {}
QUERY_RESULT_ES = {}
DSL_RESULT_LOG = {"dsl": "{}"}
DSL_RESULT_BKDATA = {"dsl": "{}"}
DSL_RESULT_ES = {"dsl": "{}"}
MAPPING_RESULT_LOG = []
MAPPING_RESULT_BKDATA = []
MAPPING_RESULT_ES = []
INDICES_RESULT_WITHOUT_STORAGE = [
{
"result_table_id": RESULT_TABLE_ID,
"result_table_name_alias": RESULT_TABLE_NAME_ALIAS,
"bk_biz_id": BK_BIZ_ID,
"collector_config_id": 231,
}
]
INDICES_RESULT_WITH_STORAGE = [
{
"bk_biz_id": BK_BIZ_ID,
"collector_config_id": 231,
"result_table_id": RESULT_TABLE_ID,
"result_table_name_alias": RESULT_TABLE_NAME_ALIAS,
"storage_cluster_id": 231,
"storage_cluster_name": STORAGE_CLUSTER_NAME,
}
]
CONFIG_DATA = {
"result_table_config": {"bk_biz_id": BK_BIZ_ID},
"result_table_storage": {
"2_bklog.test3333": {"cluster_config": {"cluster_id": 231, "cluster_name": STORAGE_CLUSTER_NAME}}
},
}
GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA = {
"result_table_config": {"bk_biz_id": BK_BIZ_ID},
"result_table_storage": {
"2_bklog.test444": {"cluster_config": {"cluster_id": 231, "cluster_name": STORAGE_CLUSTER_NAME}}
},
}
GET_CLUSTER_INFO_RESULT = {
"bk_biz_id": BK_BIZ_ID,
"storage_cluster_id": 231,
"storage_cluster_name": STORAGE_CLUSTER_NAME,
}
@FakeRedis("apps.utils.cache.cache")
class TestEsquery(TestCase):
def test_search_debug(self):
"""
测试 esquery.search()
"""
# 原生ES bkdata 第三方ES
scenario_list = [Scenario.LOG, Scenario.BKDATA, Scenario.ES]
for scenario in scenario_list:
assert_result = copy.deepcopy(SEARCH_RESULT)
params = copy.deepcopy(SEARCH_DICT)
params.update({"scenario_id": scenario})
assert_result.update({"scenario": scenario})
res = EsQuery(params).search()
# response indices排序
res["indices"] = ",".join(sorted(res["indices"].split(",")))
# assert value indices排序
assert_result["indices"] = ",".join(sorted(ES_QUERY_INDICES[scenario].split(",")))
self.maxDiff = 10000
self.assertEqual(res, assert_result)
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.query", return_value=QUERY_RESULT_LOG)
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.query", return_value=QUERY_RESULT_ES)
@patch(
"apps.log_esquery.esquery.client.QueryClientBkData.QueryClientBkData.query", return_value=QUERY_RESULT_BKDATA
)
def test_search(self, *args, **kwargs):
# 原生ES
params = copy.deepcopy(SEARCH_DICT)
params.pop("debug")
res = EsQuery(params).search()
if res.get("indices"):
# response indices排序
res["indices"] = ",".join(sorted(res["indices"].split(",")))
self.assertEqual(res, {})
# BKDATA
params = copy.deepcopy(SEARCH_DICT)
params.pop("debug")
params.update({"scenario_id": Scenario.BKDATA})
res = EsQuery(params).search()
if res.get("indices"):
res["indices"] = ",".join(sorted(res["indices"].split(",")))
self.assertEqual(res, {})
# 第三方ES
params = copy.deepcopy(SEARCH_DICT)
params.pop("debug")
params.update({"scenario_id": Scenario.ES})
res = EsQuery(params).search()
if res.get("indices"):
res["indices"] = ",".join(sorted(res["indices"].split(",")))
self.assertEqual(res, {})
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.scroll", return_value=SCROLL_RESULT_ES)
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.scroll", return_value=SCROLL_RESULT_LOG)
def test_scroll(self, *args, **kwargs):
"""
测试 esquery.scroll()
"""
# 原生ES
params = copy.deepcopy(SCROLL_DICT)
result = EsQuery(params).scroll()
self.assertEqual(result, SCROLL_RESULT_LOG)
# 第三方ES
params = copy.deepcopy(SCROLL_DICT)
params.update({"scenario_id": Scenario.ES})
result = EsQuery(params).scroll()
self.assertEqual(result, SCROLL_RESULT_ES)
def test_scroll_exception(self):
"""
测试 scroll不支持bkdata场景 ScenarioNotSupportedException
"""
params = copy.deepcopy(SCROLL_DICT)
params.update({"scenario_id": Scenario.BKDATA})
with self.assertRaises(ScenarioNotSupportedException):
EsQuery(params).scroll()
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.query", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientBkData.QueryClientBkData.query", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.query", return_value={})
def test_dsl(self, *args, **kwargs):
"""
测试 esquery.dsl()
"""
# 原生ES
params = copy.deepcopy(DSL_DICT)
result = EsQuery(params).dsl()
self.assertEqual(result, DSL_RESULT_LOG)
# bkdata
params = copy.deepcopy(DSL_DICT)
params.update({"scenario_id": Scenario.BKDATA})
result = EsQuery(params).dsl()
self.assertEqual(result, DSL_RESULT_BKDATA)
# 第三方ES
params = copy.deepcopy(DSL_DICT)
params.update({"scenario_id": Scenario.ES})
result = EsQuery(params).dsl()
self.assertEqual(result, DSL_RESULT_ES)
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.mapping", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientBkData.QueryClientBkData.mapping", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.mapping", return_value={})
def test_mapping(self, *args, **kwargs):
# 原生ES
params = copy.deepcopy(MAPPING_DICT)
result = EsQuery(params).mapping()
self.assertEqual(result, MAPPING_RESULT_LOG)
# bkdata
params = copy.deepcopy(MAPPING_DICT)
params.update({"scenario_id": Scenario.BKDATA})
result = EsQuery(params).mapping()
self.assertEqual(result, MAPPING_RESULT_BKDATA)
# 第三方ES
params = copy.deepcopy(MAPPING_DICT)
params.update({"scenario_id": Scenario.ES})
result = EsQuery(params).mapping()
self.assertEqual(result, MAPPING_RESULT_ES)
@patch("apps.log_esquery.esquery.client.QueryClientEs.QueryClientEs.mapping", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientBkData.QueryClientBkData.mapping", return_value={})
@patch("apps.log_esquery.esquery.client.QueryClientLog.QueryClientLog.mapping", return_value={})
def test_mapping_time(self, *args, **kwargs):
# 自定义时间
params = copy.deepcopy(MAPPING_DICT_TIME)
params.update({"scenario_id": Scenario.LOG})
result = EsQuery(params).mapping()
self.assertEqual(result, MAPPING_RESULT_LOG)
@patch("apps.api.TransferApi.get_result_table_storage", lambda _: CLUSTER_INFOS)
def test_indices(self):
"""
测试 EsQuery.indices()
"""
# 测试数据库添加一条CollectorConfig数据
CollectorConfig.objects.create(
collector_config_id=231,
collector_config_name="test3333",
bk_app_code="bk_log_search",
collector_scenario_id="row",
bk_biz_id=BK_BIZ_ID,
category_id="os",
target_object_type="HOST",
target_node_type="TOPO",
target_nodes=[{"bk_inst_id": 52, "bk_obj_id": "module"}],
target_subscription_diff={},
description="test3333",
is_active=True,
bk_data_id=1500586,
table_id="2_bklog.test3333",
subscription_id=2103,
task_id_list=["1331697"],
)
params = copy.deepcopy(INDICES_DICT)
result = EsQuery(params).indices()
self.assertEqual(result, INDICES_RESULT_WITHOUT_STORAGE)
params.update({"with_storage": True})
result = EsQuery(params).indices()
self.assertEqual(result, INDICES_RESULT_WITH_STORAGE)
def test_indices_exception(self):
"""
测试 ScenarioQueryIndexFailException
"""
# bkdata
params = copy.deepcopy(INDICES_DICT)
params.update({"scenario_id": Scenario.BKDATA})
# 删除 bk_biz_id 键值对
params.pop("bk_biz_id")
with self.assertRaises(ScenarioQueryIndexFailException):
EsQuery(params).indices()
@patch("apps.utils.thread.MultiExecuteFunc.append", return_value=[])
@patch("apps.utils.thread.MultiExecuteFunc.run", return_value=CONFIG_DATA)
def test_get_cluster_info(self, *args, **kwargs):
"""
测试 EsQuery.get_cluster_info()
"""
params = copy.deepcopy(GET_CLUSTER_INFO_DICT)
result = EsQuery(params).get_cluster_info()
self.assertEqual(result, GET_CLUSTER_INFO_RESULT)
@patch("apps.utils.thread.MultiExecuteFunc.append", return_value=[])
@patch("apps.utils.thread.MultiExecuteFunc.run")
def test_get_cluster_info_exception(self, config_data, *args, **kwargs):
"""
测试 IndexResultTableApiException
"""
params = copy.deepcopy(GET_CLUSTER_INFO_EXCEPTION_DICT)
config_data.return_value = copy.deepcopy(GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA).pop("result_table_config")
with self.assertRaises(IndexResultTableApiException):
EsQuery(params).get_cluster_info()
config_data.return_value = copy.deepcopy(GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA).pop("result_table_storage")
with self.assertRaises(IndexResultTableApiException):
EsQuery(params).get_cluster_info()
GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA.update({"result_table_storage": {}})
config_data.return_value = GET_CLUSTER_INFO_EXCEPTION_CONFIG_DATA
with self.assertRaises(IndexResultTableApiException):
EsQuery(params).get_cluster_info()
|
en
| 0.607295
|
# -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-LOG 蓝鲸日志平台 is licensed under the MIT License. License for BK-LOG 蓝鲸日志平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. We undertake not to change the open source license (MIT license) applicable to the current version of the project delivered to anyone in the future. 测试 esquery.search() # 原生ES bkdata 第三方ES # response indices排序 # assert value indices排序 # 原生ES # response indices排序 # BKDATA # 第三方ES 测试 esquery.scroll() # 原生ES # 第三方ES 测试 scroll不支持bkdata场景 ScenarioNotSupportedException 测试 esquery.dsl() # 原生ES # bkdata # 第三方ES # 原生ES # bkdata # 第三方ES # 自定义时间 测试 EsQuery.indices() # 测试数据库添加一条CollectorConfig数据 测试 ScenarioQueryIndexFailException # bkdata # 删除 bk_biz_id 键值对 测试 EsQuery.get_cluster_info() 测试 IndexResultTableApiException
| 1.240489
| 1
|
postprocessor.py
|
tushartushar/designite_util
| 0
|
6629723
|
# This script reads the output generated by DesigniteJava tool
# and emits the output indexed by filepath i.e.,
# Filepath, smell, project, package, type, method, cause, start_line_no
import os.path
import sys
from model import Type, Method, ImplSmell, DesignSmell, ArchSmell
def _get_type_list(out_path):
type_file = os.path.join(out_path, 'TypeMetrics.csv')
type_list = list()
if os.path.exists(type_file):
is_first_line = True
with open(type_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 15:
type_list.append(Type(tokens[0], tokens[1],
tokens[2], tokens[14], tokens[15]))
return type_list
def _get_method_list(out_path):
method_file = os.path.join(out_path, 'MethodMetrics.csv')
method_list = list()
if os.path.exists(method_file):
is_first_line = True
with open(method_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 7:
method_list.append(Method(tokens[0], tokens[1],
tokens[2], tokens[3], tokens[7]))
return method_list
def _get_impl_smells_list(out_path):
impl_file = os.path.join(out_path, 'ImplementationSmells.csv')
smell_list = list()
if os.path.exists(impl_file):
is_first_line = True
with open(impl_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 6:
smell_list.append(ImplSmell(tokens[0], tokens[1],
tokens[2], tokens[3], tokens[4], tokens[5], tokens[6]))
return smell_list
def _process_impl_smells(type_list, method_list, impl_smell_list, out_path):
out_file = os.path.join(out_path, 'postprocessed.csv')
with open(out_file, 'w', encoding='utf8', errors='ignore') as file:
file.write('Filepath,smell,project,package,type,method,cause,start_line_no\n')
for smell in impl_smell_list:
methods = [item for item in method_list if
smell.project_name == item.project_name and
smell.package_name == item.package_name and
smell.type_name == item.type_name and
smell.method_name == item.method_name and
smell.m_start_line_no == item.start_line_no]
if len(methods) > 1:
print('overridden methods detected')
if len(methods) == 0:
print('method not found')
continue
target_method = methods[0]
types = [item for item in type_list if
smell.project_name == item.project_name and
smell.package_name == item.package_name and
smell.type_name == item.type_name]
if len(types) > 1:
print('more than one classes found')
if len(types) == 0:
print('type not found')
continue
target_type = types[0]
line = target_type.file_path + ',' + smell.smell_name + ',' +\
target_type.project_name + ',' + target_type.package_name + ',' +\
target_type.type_name + ',' + target_method.method_name + ',' +\
smell.cause + ',' + target_method.start_line_no
file.write(line)
def _get_design_smells_list(out_path):
design_file = os.path.join(out_path, 'DesignSmells.csv')
smell_list = list()
if os.path.exists(design_file):
is_first_line = True
with open(design_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 4:
smell_list.append(DesignSmell(tokens[0], tokens[1],
tokens[2], tokens[3], tokens[4]))
return smell_list
def _process_design_smells(type_list, design_smell_list, out_path):
out_file = os.path.join(out_path, 'postprocessed.csv')
with open(out_file, 'a', encoding='utf8', errors='ignore') as file:
for smell in design_smell_list:
types = [item for item in type_list if
smell.project_name == item.project_name and
smell.package_name == item.package_name and
smell.type_name == item.type_name]
if len(types) > 1:
print('more than one classes found')
if len(types) == 0:
print('type not found')
continue
target_type = types[0]
line = target_type.file_path + ',' + smell.smell_name + ',' + \
target_type.project_name + ',' + target_type.package_name + ',' + \
target_type.type_name + ',,' + \
smell.cause + ',' + target_type.start_line_no
file.write(line)
def _get_arch_smells_list(out_path):
arch_file = os.path.join(out_path, 'ArchitectureSmells.csv')
smell_list = list()
if os.path.exists(arch_file):
is_first_line = True
with open(arch_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 3:
smell_list.append(ArchSmell(tokens[0], tokens[1],
tokens[2], tokens[3]))
return smell_list
def _process_arch_smells(type_list, arch_smell_list, out_path):
out_file = os.path.join(out_path, 'postprocessed.csv')
with open(out_file, 'a', encoding='utf8', errors='ignore') as file:
for smell in arch_smell_list:
types = [item for item in type_list if
smell.project_name == item.project_name and
smell.package_name == item.package_name]
if len(types) == 0:
# check if it 'dense structure' smell. In this case, all packages are marked as smelly.
if smell.package_name == '<All packages>':
types = [item for item in type_list if
smell.project_name == item.project_name]
if len(types) == 0:
print('no matching types found')
continue
for type in types:
line = type.file_path + ',' + smell.smell_name + ',' + \
type.project_name + ',' + type.package_name + ',' + \
type.type_name + ',,' + \
smell.cause + ',' + type.start_line_no
file.write(line)
def process(out_path):
type_list = _get_type_list(out_path)
method_list = _get_method_list(out_path)
impl_smell_list = _get_impl_smells_list(out_path)
_process_impl_smells(type_list, method_list, impl_smell_list, out_path)
design_smell_list = _get_design_smells_list(out_path)
_process_design_smells(type_list, design_smell_list, out_path)
arch_smell_list = _get_arch_smells_list(out_path)
_process_arch_smells(type_list, arch_smell_list, out_path)
if __name__ == '__main__':
if len(sys.argv) > 1:
process(sys.argv[1])
else:
print('Arg error: specify designite output folder path as the parameter.')
|
# This script reads the output generated by DesigniteJava tool
# and emits the output indexed by filepath i.e.,
# Filepath, smell, project, package, type, method, cause, start_line_no
import os.path
import sys
from model import Type, Method, ImplSmell, DesignSmell, ArchSmell
def _get_type_list(out_path):
type_file = os.path.join(out_path, 'TypeMetrics.csv')
type_list = list()
if os.path.exists(type_file):
is_first_line = True
with open(type_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 15:
type_list.append(Type(tokens[0], tokens[1],
tokens[2], tokens[14], tokens[15]))
return type_list
def _get_method_list(out_path):
method_file = os.path.join(out_path, 'MethodMetrics.csv')
method_list = list()
if os.path.exists(method_file):
is_first_line = True
with open(method_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 7:
method_list.append(Method(tokens[0], tokens[1],
tokens[2], tokens[3], tokens[7]))
return method_list
def _get_impl_smells_list(out_path):
impl_file = os.path.join(out_path, 'ImplementationSmells.csv')
smell_list = list()
if os.path.exists(impl_file):
is_first_line = True
with open(impl_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 6:
smell_list.append(ImplSmell(tokens[0], tokens[1],
tokens[2], tokens[3], tokens[4], tokens[5], tokens[6]))
return smell_list
def _process_impl_smells(type_list, method_list, impl_smell_list, out_path):
out_file = os.path.join(out_path, 'postprocessed.csv')
with open(out_file, 'w', encoding='utf8', errors='ignore') as file:
file.write('Filepath,smell,project,package,type,method,cause,start_line_no\n')
for smell in impl_smell_list:
methods = [item for item in method_list if
smell.project_name == item.project_name and
smell.package_name == item.package_name and
smell.type_name == item.type_name and
smell.method_name == item.method_name and
smell.m_start_line_no == item.start_line_no]
if len(methods) > 1:
print('overridden methods detected')
if len(methods) == 0:
print('method not found')
continue
target_method = methods[0]
types = [item for item in type_list if
smell.project_name == item.project_name and
smell.package_name == item.package_name and
smell.type_name == item.type_name]
if len(types) > 1:
print('more than one classes found')
if len(types) == 0:
print('type not found')
continue
target_type = types[0]
line = target_type.file_path + ',' + smell.smell_name + ',' +\
target_type.project_name + ',' + target_type.package_name + ',' +\
target_type.type_name + ',' + target_method.method_name + ',' +\
smell.cause + ',' + target_method.start_line_no
file.write(line)
def _get_design_smells_list(out_path):
design_file = os.path.join(out_path, 'DesignSmells.csv')
smell_list = list()
if os.path.exists(design_file):
is_first_line = True
with open(design_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 4:
smell_list.append(DesignSmell(tokens[0], tokens[1],
tokens[2], tokens[3], tokens[4]))
return smell_list
def _process_design_smells(type_list, design_smell_list, out_path):
out_file = os.path.join(out_path, 'postprocessed.csv')
with open(out_file, 'a', encoding='utf8', errors='ignore') as file:
for smell in design_smell_list:
types = [item for item in type_list if
smell.project_name == item.project_name and
smell.package_name == item.package_name and
smell.type_name == item.type_name]
if len(types) > 1:
print('more than one classes found')
if len(types) == 0:
print('type not found')
continue
target_type = types[0]
line = target_type.file_path + ',' + smell.smell_name + ',' + \
target_type.project_name + ',' + target_type.package_name + ',' + \
target_type.type_name + ',,' + \
smell.cause + ',' + target_type.start_line_no
file.write(line)
def _get_arch_smells_list(out_path):
arch_file = os.path.join(out_path, 'ArchitectureSmells.csv')
smell_list = list()
if os.path.exists(arch_file):
is_first_line = True
with open(arch_file, 'r', encoding='utf8', errors='ignore') as file:
for line in file:
if is_first_line:
is_first_line = False
continue
tokens = line.split(',')
if len(tokens) > 3:
smell_list.append(ArchSmell(tokens[0], tokens[1],
tokens[2], tokens[3]))
return smell_list
def _process_arch_smells(type_list, arch_smell_list, out_path):
out_file = os.path.join(out_path, 'postprocessed.csv')
with open(out_file, 'a', encoding='utf8', errors='ignore') as file:
for smell in arch_smell_list:
types = [item for item in type_list if
smell.project_name == item.project_name and
smell.package_name == item.package_name]
if len(types) == 0:
# check if it 'dense structure' smell. In this case, all packages are marked as smelly.
if smell.package_name == '<All packages>':
types = [item for item in type_list if
smell.project_name == item.project_name]
if len(types) == 0:
print('no matching types found')
continue
for type in types:
line = type.file_path + ',' + smell.smell_name + ',' + \
type.project_name + ',' + type.package_name + ',' + \
type.type_name + ',,' + \
smell.cause + ',' + type.start_line_no
file.write(line)
def process(out_path):
type_list = _get_type_list(out_path)
method_list = _get_method_list(out_path)
impl_smell_list = _get_impl_smells_list(out_path)
_process_impl_smells(type_list, method_list, impl_smell_list, out_path)
design_smell_list = _get_design_smells_list(out_path)
_process_design_smells(type_list, design_smell_list, out_path)
arch_smell_list = _get_arch_smells_list(out_path)
_process_arch_smells(type_list, arch_smell_list, out_path)
if __name__ == '__main__':
if len(sys.argv) > 1:
process(sys.argv[1])
else:
print('Arg error: specify designite output folder path as the parameter.')
|
en
| 0.873235
|
# This script reads the output generated by DesigniteJava tool # and emits the output indexed by filepath i.e., # Filepath, smell, project, package, type, method, cause, start_line_no # check if it 'dense structure' smell. In this case, all packages are marked as smelly.
| 2.466852
| 2
|
tests/bootstrap/__init__.py
|
fedaykin/salt-bootstrap
| 1
|
6629724
|
<filename>tests/bootstrap/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
bootstrap
~~~~~~~~~
salt-bootstrap script unittesting
:codeauthor: :email:`<NAME> (<EMAIL>)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
"""
import os
import sys
import fcntl
import signal
import tempfile
import subprocess
from datetime import datetime, timedelta
# support python < 2.7 via unittest2
if sys.version_info < (2, 7):
try:
from unittest2 import (
TestLoader,
TextTestRunner,
TestCase,
expectedFailure,
TestSuite,
skipIf,
)
except ImportError:
raise SystemExit('You need to install unittest2 to run the salt tests')
else:
from unittest import (
TestLoader,
TextTestRunner,
TestCase,
expectedFailure,
TestSuite,
skipIf,
)
from bootstrap.ext.os_data import GRAINS
TEST_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
EXT_DIR = os.path.join(TEST_DIR, 'ext')
PARENT_DIR = os.path.dirname(TEST_DIR)
BOOTSTRAP_SCRIPT_PATH = os.path.join(PARENT_DIR, 'bootstrap-salt.sh')
def non_block_read(output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read()
except:
return ''
class BootstrapTestCase(TestCase):
def run_script(self,
script=BOOTSTRAP_SCRIPT_PATH,
args=(),
cwd=PARENT_DIR,
timeout=None,
executable='/bin/sh',
stream_stds=False):
cmd = [script] + list(args)
out = err = ''
popen_kwargs = {
'cwd': cwd,
'shell': True,
'stderr': subprocess.PIPE,
'stdout': subprocess.PIPE,
'close_fds': True,
'executable': executable,
# detach from parent group (no more inherited signals!)
'preexec_fn': os.setpgrp
}
cmd = ' '.join(filter(None, [script] + list(args)))
process = subprocess.Popen(cmd, **popen_kwargs)
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
while True:
process.poll()
if process.returncode is not None:
break
rout = non_block_read(process.stdout)
if rout:
out += rout
if stream_stds:
sys.stdout.write(rout)
rerr = non_block_read(process.stderr)
if rerr:
err += rerr
if stream_stds:
sys.stderr.write(rerr)
if timeout is not None:
now = datetime.now()
if now > stop_at:
if term_sent is False:
# Kill the process group since sending the term signal
# would only terminate the shell, not the command
# executed in the shell
os.killpg(os.getpgid(process.pid), signal.SIGINT)
term_sent = True
continue
# As a last resort, kill the process group
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
return 1, [
'Process took more than {0} seconds to complete. '
'Process Killed! Current STDOUT: \n{1}'.format(
timeout, out
)
], [
'Process took more than {0} seconds to complete. '
'Process Killed! Current STDERR: \n{1}'.format(
timeout, err
)
]
process.communicate()
try:
return process.returncode, out.splitlines(), err.splitlines()
finally:
try:
process.terminate()
except OSError:
# process already terminated
pass
def assert_script_result(self, fail_msg, expected_rc, process_details):
rc, out, err = process_details
if rc != expected_rc:
err_msg = '{0}:\n'.format(fail_msg)
if out:
err_msg = '{0}STDOUT:\n{1}\n'.format(err_msg, '\n'.join(out))
if err:
err_msg = '{0}STDERR:\n{1}\n'.format(err_msg, '\n'.join(err))
raise AssertionError(err_msg.rstrip())
|
<filename>tests/bootstrap/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
bootstrap
~~~~~~~~~
salt-bootstrap script unittesting
:codeauthor: :email:`<NAME> (<EMAIL>)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
"""
import os
import sys
import fcntl
import signal
import tempfile
import subprocess
from datetime import datetime, timedelta
# support python < 2.7 via unittest2
if sys.version_info < (2, 7):
try:
from unittest2 import (
TestLoader,
TextTestRunner,
TestCase,
expectedFailure,
TestSuite,
skipIf,
)
except ImportError:
raise SystemExit('You need to install unittest2 to run the salt tests')
else:
from unittest import (
TestLoader,
TextTestRunner,
TestCase,
expectedFailure,
TestSuite,
skipIf,
)
from bootstrap.ext.os_data import GRAINS
TEST_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
EXT_DIR = os.path.join(TEST_DIR, 'ext')
PARENT_DIR = os.path.dirname(TEST_DIR)
BOOTSTRAP_SCRIPT_PATH = os.path.join(PARENT_DIR, 'bootstrap-salt.sh')
def non_block_read(output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read()
except:
return ''
class BootstrapTestCase(TestCase):
def run_script(self,
script=BOOTSTRAP_SCRIPT_PATH,
args=(),
cwd=PARENT_DIR,
timeout=None,
executable='/bin/sh',
stream_stds=False):
cmd = [script] + list(args)
out = err = ''
popen_kwargs = {
'cwd': cwd,
'shell': True,
'stderr': subprocess.PIPE,
'stdout': subprocess.PIPE,
'close_fds': True,
'executable': executable,
# detach from parent group (no more inherited signals!)
'preexec_fn': os.setpgrp
}
cmd = ' '.join(filter(None, [script] + list(args)))
process = subprocess.Popen(cmd, **popen_kwargs)
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
while True:
process.poll()
if process.returncode is not None:
break
rout = non_block_read(process.stdout)
if rout:
out += rout
if stream_stds:
sys.stdout.write(rout)
rerr = non_block_read(process.stderr)
if rerr:
err += rerr
if stream_stds:
sys.stderr.write(rerr)
if timeout is not None:
now = datetime.now()
if now > stop_at:
if term_sent is False:
# Kill the process group since sending the term signal
# would only terminate the shell, not the command
# executed in the shell
os.killpg(os.getpgid(process.pid), signal.SIGINT)
term_sent = True
continue
# As a last resort, kill the process group
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
return 1, [
'Process took more than {0} seconds to complete. '
'Process Killed! Current STDOUT: \n{1}'.format(
timeout, out
)
], [
'Process took more than {0} seconds to complete. '
'Process Killed! Current STDERR: \n{1}'.format(
timeout, err
)
]
process.communicate()
try:
return process.returncode, out.splitlines(), err.splitlines()
finally:
try:
process.terminate()
except OSError:
# process already terminated
pass
def assert_script_result(self, fail_msg, expected_rc, process_details):
rc, out, err = process_details
if rc != expected_rc:
err_msg = '{0}:\n'.format(fail_msg)
if out:
err_msg = '{0}STDOUT:\n{1}\n'.format(err_msg, '\n'.join(out))
if err:
err_msg = '{0}STDERR:\n{1}\n'.format(err_msg, '\n'.join(err))
raise AssertionError(err_msg.rstrip())
|
en
| 0.796495
|
#!/usr/bin/env python # -*- coding: utf-8 -*- bootstrap ~~~~~~~~~ salt-bootstrap script unittesting :codeauthor: :email:`<NAME> (<EMAIL>)` :copyright: © 2013 by the SaltStack Team, see AUTHORS for more details. :license: Apache 2.0, see LICENSE for more details. # support python < 2.7 via unittest2 # detach from parent group (no more inherited signals!) # Kill the process group since sending the term signal # would only terminate the shell, not the command # executed in the shell # As a last resort, kill the process group # process already terminated
| 2.209046
| 2
|
NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/utils/iers/tests/test_iers.py
|
sahirsharma/Martian
| 0
|
6629725
|
<filename>NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/utils/iers/tests/test_iers.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import numpy as np
from ....tests.helper import pytest, assert_quantity_allclose
from .. import iers
from .... import units as u
from ....table import QTable
from ....time import Time
from ....extern.six.moves import urllib
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', IOError)
try:
iers.IERS_A.open() # check if IERS_A is available
except IOError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = os.path.join(os.path.dirname(__file__), 'iers_a_excerpt')
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
def test_simple(self):
iers.IERS.close()
assert iers.IERS.iers_table is None
iers_tab = iers.IERS.open()
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
assert iers_tab['UT1_UTC'].unit is u.second
assert iers_tab['PM_x'].unit is u.arcsecond
assert iers_tab['PM_y'].unit is u.arcsecond
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert ut1_utc.unit is u.second
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=1.*u.ns)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] *u.s,
atol=1.*u.ns)
def test_open_filename(self):
iers.IERS.close()
iers.IERS.open(iers.IERS_B_FILE)
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
iers.IERS.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open("file:" + urllib.request.pathname2url(IERS_A_EXCERPT))
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert iers_tab['UT1_UTC'].unit is u.second
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert iers_tab['PM_x'].unit is u.arcsecond
assert iers_tab['PM_y'].unit is u.arcsecond
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=1.*u.ns)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=1.*u.narcsec)
@pytest.mark.skipif(str('not HAS_IERS_A'))
class TestIERS_A():
def test_simple(self):
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=1.*u.ns)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
|
<filename>NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/utils/iers/tests/test_iers.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import numpy as np
from ....tests.helper import pytest, assert_quantity_allclose
from .. import iers
from .... import units as u
from ....table import QTable
from ....time import Time
from ....extern.six.moves import urllib
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', IOError)
try:
iers.IERS_A.open() # check if IERS_A is available
except IOError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = os.path.join(os.path.dirname(__file__), 'iers_a_excerpt')
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
def test_simple(self):
iers.IERS.close()
assert iers.IERS.iers_table is None
iers_tab = iers.IERS.open()
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
assert iers_tab['UT1_UTC'].unit is u.second
assert iers_tab['PM_x'].unit is u.arcsecond
assert iers_tab['PM_y'].unit is u.arcsecond
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert ut1_utc.unit is u.second
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=1.*u.ns)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] *u.s,
atol=1.*u.ns)
def test_open_filename(self):
iers.IERS.close()
iers.IERS.open(iers.IERS_B_FILE)
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
iers.IERS.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open("file:" + urllib.request.pathname2url(IERS_A_EXCERPT))
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert iers_tab['UT1_UTC'].unit is u.second
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert iers_tab['PM_x'].unit is u.arcsecond
assert iers_tab['PM_y'].unit is u.arcsecond
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=1.*u.ns)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=1.*u.narcsec)
@pytest.mark.skipif(str('not HAS_IERS_A'))
class TestIERS_A():
def test_simple(self):
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=1.*u.ns)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
|
en
| 0.760592
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst # check if IERS_A is available Basic tests that IERS_B returns correct values # should be future-proof; surely we've moved to another planet by then # also check it returns the right status # check it works via Time too
| 2.332959
| 2
|
tests/integration/standard/test_custom_payload.py
|
fatelei/python-driver
| 0
|
6629726
|
<reponame>fatelei/python-driver
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest
import six
from cassandra.query import (SimpleStatement, BatchStatement, BatchType)
from cassandra.cluster import Cluster
from tests.integration import use_singledc, PROTOCOL_VERSION
def setup_module():
use_singledc()
class CustomPayloadTests(unittest.TestCase):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest(
"Native protocol 4,0+ is required for custom payloads, currently using %r"
% (PROTOCOL_VERSION,))
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
def tearDown(self):
self.cluster.shutdown()
def test_custom_query_basic(self):
"""
Test to validate that custom payloads work with simple queries
creates a simple query and ensures that custom payloads are passed to C*. A custom
query provider is used with C* so we can validate that same custom payloads are sent back
with the results
@since 2.6
@jira_ticket PYTHON-280
@expected_result valid custom payloads should be sent and received
@test_category queries:custom_payload
"""
# Create a simple query statement a
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
# Validate that various types of custom payloads are sent and received okay
self.validate_various_custom_payloads(statement=statement)
def test_custom_query_batching(self):
"""
Test to validate that custom payloads work with batch queries
creates a batch query and ensures that custom payloads are passed to C*. A custom
query provider is used with C* so we can validate that same custom payloads are sent back
with the results
@since 2.6
@jira_ticket PYTHON-280
@expected_result valid custom payloads should be sent and received
@test_category queries:custom_payload
"""
# Construct Batch Statement
batch = BatchStatement(BatchType.LOGGED)
for i in range(10):
batch.add(SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (%s, %s)"), (i, i))
# Validate that various types of custom payloads are sent and received okay
self.validate_various_custom_payloads(statement=batch)
def test_custom_query_prepared(self):
"""
Test to validate that custom payloads work with prepared queries
creates a batch query and ensures that custom payloads are passed to C*. A custom
query provider is used with C* so we can validate that same custom payloads are sent back
with the results
@since 2.6
@jira_ticket PYTHON-280
@expected_result valid custom payloads should be sent and received
@test_category queries:custom_payload
"""
# Construct prepared statement
prepared = self.session.prepare(
"""
INSERT INTO test3rf.test (k, v) VALUES (?, ?)
""")
bound = prepared.bind((1, None))
# Validate that various custom payloads are validated correctly
self.validate_various_custom_payloads(statement=bound)
def validate_various_custom_payloads(self, statement):
"""
This is a utility method that given a statement will attempt
to submit the statement with various custom payloads. It will
validate that the custom payloads are sent and received correctly.
@param statement The statement to validate the custom queries in conjunction with
"""
# Simple key value
custom_payload = {'test': b'test_return'}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# no key value
custom_payload = {'': b''}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# Space value
custom_payload = {' ': b' '}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# Long key value pair
key_value = "x" * 10
custom_payload = {key_value: six.b(key_value)}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# Max supported value key pairs according C* binary protocol v4 should be 65534 (unsigned short max value)
for i in range(65534):
custom_payload[str(i)] = six.b('x')
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# Add one custom payload to this is too many key value pairs and should fail
custom_payload[str(65535)] = six.b('x')
with self.assertRaises(ValueError):
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
def execute_async_validate_custom_payload(self, statement, custom_payload):
"""
This is just a simple method that submits a statement with a payload, and validates
that the custom payload we submitted matches the one that we got back
@param statement The statement to execute
@param custom_payload The custom payload to submit with
"""
# Submit the statement with our custom payload. Validate the one
# we receive from the server matches
response_future = self.session.execute_async(statement, custom_payload=custom_payload)
response_future.result()
returned_custom_payload = response_future.custom_payload
self.assertEqual(custom_payload, returned_custom_payload)
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest
import six
from cassandra.query import (SimpleStatement, BatchStatement, BatchType)
from cassandra.cluster import Cluster
from tests.integration import use_singledc, PROTOCOL_VERSION
def setup_module():
use_singledc()
class CustomPayloadTests(unittest.TestCase):
def setUp(self):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest(
"Native protocol 4,0+ is required for custom payloads, currently using %r"
% (PROTOCOL_VERSION,))
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
def tearDown(self):
self.cluster.shutdown()
def test_custom_query_basic(self):
"""
Test to validate that custom payloads work with simple queries
creates a simple query and ensures that custom payloads are passed to C*. A custom
query provider is used with C* so we can validate that same custom payloads are sent back
with the results
@since 2.6
@jira_ticket PYTHON-280
@expected_result valid custom payloads should be sent and received
@test_category queries:custom_payload
"""
# Create a simple query statement a
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
# Validate that various types of custom payloads are sent and received okay
self.validate_various_custom_payloads(statement=statement)
def test_custom_query_batching(self):
"""
Test to validate that custom payloads work with batch queries
creates a batch query and ensures that custom payloads are passed to C*. A custom
query provider is used with C* so we can validate that same custom payloads are sent back
with the results
@since 2.6
@jira_ticket PYTHON-280
@expected_result valid custom payloads should be sent and received
@test_category queries:custom_payload
"""
# Construct Batch Statement
batch = BatchStatement(BatchType.LOGGED)
for i in range(10):
batch.add(SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (%s, %s)"), (i, i))
# Validate that various types of custom payloads are sent and received okay
self.validate_various_custom_payloads(statement=batch)
def test_custom_query_prepared(self):
"""
Test to validate that custom payloads work with prepared queries
creates a batch query and ensures that custom payloads are passed to C*. A custom
query provider is used with C* so we can validate that same custom payloads are sent back
with the results
@since 2.6
@jira_ticket PYTHON-280
@expected_result valid custom payloads should be sent and received
@test_category queries:custom_payload
"""
# Construct prepared statement
prepared = self.session.prepare(
"""
INSERT INTO test3rf.test (k, v) VALUES (?, ?)
""")
bound = prepared.bind((1, None))
# Validate that various custom payloads are validated correctly
self.validate_various_custom_payloads(statement=bound)
def validate_various_custom_payloads(self, statement):
"""
This is a utility method that given a statement will attempt
to submit the statement with various custom payloads. It will
validate that the custom payloads are sent and received correctly.
@param statement The statement to validate the custom queries in conjunction with
"""
# Simple key value
custom_payload = {'test': b'test_return'}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# no key value
custom_payload = {'': b''}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# Space value
custom_payload = {' ': b' '}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# Long key value pair
key_value = "x" * 10
custom_payload = {key_value: six.b(key_value)}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# Max supported value key pairs according C* binary protocol v4 should be 65534 (unsigned short max value)
for i in range(65534):
custom_payload[str(i)] = six.b('x')
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
# Add one custom payload to this is too many key value pairs and should fail
custom_payload[str(65535)] = six.b('x')
with self.assertRaises(ValueError):
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
def execute_async_validate_custom_payload(self, statement, custom_payload):
"""
This is just a simple method that submits a statement with a payload, and validates
that the custom payload we submitted matches the one that we got back
@param statement The statement to execute
@param custom_payload The custom payload to submit with
"""
# Submit the statement with our custom payload. Validate the one
# we receive from the server matches
response_future = self.session.execute_async(statement, custom_payload=custom_payload)
response_future.result()
returned_custom_payload = response_future.custom_payload
self.assertEqual(custom_payload, returned_custom_payload)
|
en
| 0.875396
|
# Copyright 2013-2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test to validate that custom payloads work with simple queries creates a simple query and ensures that custom payloads are passed to C*. A custom query provider is used with C* so we can validate that same custom payloads are sent back with the results @since 2.6 @jira_ticket PYTHON-280 @expected_result valid custom payloads should be sent and received @test_category queries:custom_payload # Create a simple query statement a # Validate that various types of custom payloads are sent and received okay Test to validate that custom payloads work with batch queries creates a batch query and ensures that custom payloads are passed to C*. A custom query provider is used with C* so we can validate that same custom payloads are sent back with the results @since 2.6 @jira_ticket PYTHON-280 @expected_result valid custom payloads should be sent and received @test_category queries:custom_payload # Construct Batch Statement # Validate that various types of custom payloads are sent and received okay Test to validate that custom payloads work with prepared queries creates a batch query and ensures that custom payloads are passed to C*. A custom query provider is used with C* so we can validate that same custom payloads are sent back with the results @since 2.6 @jira_ticket PYTHON-280 @expected_result valid custom payloads should be sent and received @test_category queries:custom_payload # Construct prepared statement INSERT INTO test3rf.test (k, v) VALUES (?, ?) # Validate that various custom payloads are validated correctly This is a utility method that given a statement will attempt to submit the statement with various custom payloads. It will validate that the custom payloads are sent and received correctly. @param statement The statement to validate the custom queries in conjunction with # Simple key value # no key value # Space value # Long key value pair # Max supported value key pairs according C* binary protocol v4 should be 65534 (unsigned short max value) # Add one custom payload to this is too many key value pairs and should fail This is just a simple method that submits a statement with a payload, and validates that the custom payload we submitted matches the one that we got back @param statement The statement to execute @param custom_payload The custom payload to submit with # Submit the statement with our custom payload. Validate the one # we receive from the server matches
| 2.197431
| 2
|
pyxedit/xedit/object_classes/ACHR.py
|
leontristain/pyxedit
| 0
|
6629727
|
from pyxedit.xedit.attribute import XEditAttribute
from pyxedit.xedit.generic import XEditGenericObject
class XEditActor(XEditGenericObject):
SIGNATURE = 'ACHR'
data = XEditAttribute('DATA')
position_x = XEditAttribute('DATA\\Position\\X')
position_y = XEditAttribute('DATA\\Position\\X')
position_z = XEditAttribute('DATA\\Position\\Z')
rotation_x = XEditAttribute('DATA\\Rotation\\X')
rotation_y = XEditAttribute('DATA\\Rotation\\Y')
rotation_z = XEditAttribute('DATA\\Rotation\\Z')
@property
def position(self):
return (self.position_x, self.position_y, self.position_z)
@position.setter
def position(self, value):
self.position_x, self.position_y, self.position_z = map(float, value)
@property
def rotation(self):
return (self.rotation_x, self.rotation_y, self.rotation_z)
@rotation.setter
def rotation(self, value):
self.rotation_x, self.rotation_y, self.rotation_z = map(float, value)
|
from pyxedit.xedit.attribute import XEditAttribute
from pyxedit.xedit.generic import XEditGenericObject
class XEditActor(XEditGenericObject):
SIGNATURE = 'ACHR'
data = XEditAttribute('DATA')
position_x = XEditAttribute('DATA\\Position\\X')
position_y = XEditAttribute('DATA\\Position\\X')
position_z = XEditAttribute('DATA\\Position\\Z')
rotation_x = XEditAttribute('DATA\\Rotation\\X')
rotation_y = XEditAttribute('DATA\\Rotation\\Y')
rotation_z = XEditAttribute('DATA\\Rotation\\Z')
@property
def position(self):
return (self.position_x, self.position_y, self.position_z)
@position.setter
def position(self, value):
self.position_x, self.position_y, self.position_z = map(float, value)
@property
def rotation(self):
return (self.rotation_x, self.rotation_y, self.rotation_z)
@rotation.setter
def rotation(self, value):
self.rotation_x, self.rotation_y, self.rotation_z = map(float, value)
|
none
| 1
| 2.265981
| 2
|
|
tests/test_cli.py
|
s-weigand/verbose_version_info
| 2
|
6629728
|
<reponame>s-weigand/verbose_version_info
"""Tests for the CLI"""
import re
import sys
import pytest
from _pytest.monkeypatch import MonkeyPatch
from typer.testing import CliRunner
from verbose_version_info.cli import cli
def test_missing_cli_extra_requires(monkeypatch: MonkeyPatch):
"""Exception raised if cli extra_requires is missing"""
monkeypatch.delitem(sys.modules, "verbose_version_info.cli")
monkeypatch.setitem(sys.modules, "typer", None)
with pytest.raises(ImportError, match=r"pip install verbose-version-info\[cli\]"):
import verbose_version_info.cli # noqa: F401
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
assert "Not yet Implemented!" in result.output
help_result = runner.invoke(cli, ["--help"])
assert help_result.exit_code == 0
assert re.search(r"--help\s+Show this message and exit\.", help_result.output) is not None
|
"""Tests for the CLI"""
import re
import sys
import pytest
from _pytest.monkeypatch import MonkeyPatch
from typer.testing import CliRunner
from verbose_version_info.cli import cli
def test_missing_cli_extra_requires(monkeypatch: MonkeyPatch):
"""Exception raised if cli extra_requires is missing"""
monkeypatch.delitem(sys.modules, "verbose_version_info.cli")
monkeypatch.setitem(sys.modules, "typer", None)
with pytest.raises(ImportError, match=r"pip install verbose-version-info\[cli\]"):
import verbose_version_info.cli # noqa: F401
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
assert "Not yet Implemented!" in result.output
help_result = runner.invoke(cli, ["--help"])
assert help_result.exit_code == 0
assert re.search(r"--help\s+Show this message and exit\.", help_result.output) is not None
|
en
| 0.684343
|
Tests for the CLI Exception raised if cli extra_requires is missing # noqa: F401 Test the CLI.
| 2.37765
| 2
|
estruturas_controle_projetos/fibonacci_v4.py
|
pethersonmoreno/python-study
| 0
|
6629729
|
<reponame>pethersonmoreno/python-study
#!/usr/local/bin/python3
# 0, 1, 1, 2, 3, 5, 8, 13, 21...
def fibonacci(limite):
resultado = [0, 1]
while resultado[-1] < limite:
resultado.append(resultado[-2] + resultado[-1])
return resultado
if __name__ == '__main__':
for fib in fibonacci(10000):
print(fib)
|
#!/usr/local/bin/python3
# 0, 1, 1, 2, 3, 5, 8, 13, 21...
def fibonacci(limite):
resultado = [0, 1]
while resultado[-1] < limite:
resultado.append(resultado[-2] + resultado[-1])
return resultado
if __name__ == '__main__':
for fib in fibonacci(10000):
print(fib)
|
en
| 0.428027
|
#!/usr/local/bin/python3 # 0, 1, 1, 2, 3, 5, 8, 13, 21...
| 3.89453
| 4
|
core/controllers/email_dashboard_test.py
|
yash10019coder/oppia
| 5
|
6629730
|
<filename>core/controllers/email_dashboard_test.py
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for email dashboard handler."""
from __future__ import annotations
from core import feconf
from core.domain import user_query_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
(user_models, email_models) = models.Registry.import_models(
[models.NAMES.user, models.NAMES.email])
class EmailDashboardDataHandlerTests(test_utils.GenericTestBase):
SUBMITTER_EMAIL = '<EMAIL>'
SUBMITTER_USERNAME = 'submit'
USER_A_EMAIL = '<EMAIL>'
USER_A_USERNAME = 'a'
SAMPLE_QUERY_PARAM = {
'inactive_in_last_n_days': 10,
'created_at_least_n_exps': 5,
'has_not_logged_in_for_n_days': 30
}
def setUp(self):
super(EmailDashboardDataHandlerTests, self).setUp()
self.signup(self.SUBMITTER_EMAIL, self.SUBMITTER_USERNAME)
self.submitter_id = self.get_user_id_from_email(
self.SUBMITTER_EMAIL)
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(
self.USER_A_EMAIL)
self.set_curriculum_admins([self.SUBMITTER_USERNAME])
def test_query_status_check_handler_with_invalid_query_id_raises_400(
self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_json(
'/querystatuscheck', params={'query_id': 'invalid_query_id'},
expected_status_int=400)
self.assertEqual(response['error'], 'Invalid query id.')
self.logout()
def test_query_status_check_handler(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
user_query_id = user_query_services.save_new_user_query(
self.submitter_id, self.SAMPLE_QUERY_PARAM)
query_data = self.get_json(
'/querystatuscheck', params={'query_id': user_query_id})['query']
self.assertEqual(query_data['id'], user_query_id)
self.assertEqual(
query_data['status'], feconf.USER_QUERY_STATUS_PROCESSING)
self.assertEqual(
query_data['submitter_username'], self.SUBMITTER_USERNAME)
self.assertNotIn('submitter_id', query_data)
self.logout()
def test_that_page_is_accessible_to_authorised_users_only(self):
# Make sure that only authorised users can access query pages.
self.login(self.USER_A_EMAIL)
with self.assertRaisesRegex(Exception, '401 Unauthorized'):
self.get_html_response('/emaildashboard')
with self.assertRaisesRegex(Exception, '401 Unauthorized'):
self.get_html_response(
'/querystatuscheck?query_id=%s' % 'valid_query_id')
self.logout()
def test_that_exception_is_raised_for_invalid_input(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': 2,
'inactive_in_last_n_days': 5,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': 'None',
'edited_at_least_n_exps': None,
'created_collection': True,
'fake_key': 2
}}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_starting_job(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': 2,
'inactive_in_last_n_days': 5,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': None,
'edited_at_least_n_exps': None,
'created_collection': True,
}}, csrf_token=csrf_token)
self.logout()
def test_email_dashboard_page(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_html_response('/emaildashboard')
self.assertIn(b'{"title": "Email Dashboard - Oppia"})', response.body)
self.logout()
class EmailDashboardResultTests(test_utils.EmailTestBase):
"""Tests for email dashboard result handler."""
USER_A_EMAIL = '<EMAIL>'
USER_A_USERNAME = 'a'
USER_B_EMAIL = '<EMAIL>'
USER_B_USERNAME = 'b'
SUBMITTER_EMAIL = '<EMAIL>'
SUBMITTER_USERNAME = 'submit'
NEW_SUBMITTER_EMAIL = '<EMAIL>'
NEW_SUBMITTER_USERNAME = 'submit2'
EXP_ID_1 = 'exp_1'
EXP_ID_2 = 'exp_2'
SAMPLE_QUERY_PARAM = {
'inactive_in_last_n_days': 10,
'created_at_least_n_exps': 5,
'has_not_logged_in_for_n_days': 30
}
def setUp(self):
super(EmailDashboardResultTests, self).setUp()
# User A has one created exploration.
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.signup(feconf.SYSTEM_EMAIL_ADDRESS, 'systemUser')
self.user_a_id = self.get_user_id_from_email(
self.USER_A_EMAIL)
user_services.update_email_preferences(
self.user_a_id, True, True, True, True)
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_a_id, end_state_name='End')
# User B has one created exploration.
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(
self.USER_B_EMAIL)
user_services.update_email_preferences(
self.user_b_id, True, True, True, True)
self.save_new_valid_exploration(
self.EXP_ID_2, self.user_b_id, end_state_name='End')
# Submitter and new_submitter are submitter of query.
self.signup(self.SUBMITTER_EMAIL, self.SUBMITTER_USERNAME)
self.submitter_id = self.get_user_id_from_email(
self.SUBMITTER_EMAIL)
self.signup(self.NEW_SUBMITTER_EMAIL, self.NEW_SUBMITTER_USERNAME)
self.new_submitter_id = self.get_user_id_from_email(
self.NEW_SUBMITTER_EMAIL)
self.set_curriculum_admins(
[self.SUBMITTER_USERNAME, self.NEW_SUBMITTER_USERNAME])
def test_email_dashboard_result_page(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]).put()
response = self.get_html_response('/emaildashboardresult/%s' % query_id)
self.assertIn(
b'{"title": "Email Dashboard Result - Oppia"})', response.body)
self.logout()
def test_email_dashboard_result_page_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_html_response(
'/emaildashboardresult/aaa', expected_status_int=400)
self.assertIn(
b'<oppia-error-page-root></oppia-error-page-root>', response.body)
self.logout()
def test_email_dashboard_result_page_with_invalid_user_raises_401(self):
self.login(self.NEW_SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]
).put()
response = self.get_html_response(
'/emaildashboardresult/%s' % query_id, expected_status_int=401)
self.assertIn(
b'<oppia-error-page-root></oppia-error-page-root>', response.body)
self.logout()
def test_email_dashboard_result_post_passes(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
query_model = user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[self.user_a_id, self.user_b_id]
)
query_model.put()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
# Send email from email dashboard result page.
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardresult/%s' % query_id, {
'email_subject': 'subject',
'email_body': 'body',
'max_recipients': None,
'email_intent': 'bulk_email_create_exploration'
}, csrf_token=csrf_token)
self.logout()
# Check that emails are sent to qualified users.
messages_a = self._get_sent_email_messages(
self.USER_A_EMAIL)
self.assertEqual(len(messages_a), 1)
self.assertEqual(messages_a[0].html, 'body')
self.assertEqual(messages_a[0].body, 'body')
messages_b = self._get_sent_email_messages(
self.USER_B_EMAIL)
self.assertEqual(len(messages_b), 1)
self.assertEqual(messages_b[0].html, 'body')
self.assertEqual(messages_b[0].body, 'body')
# Check that correct email model is stored in backend.
query_model = user_models.UserQueryModel.get_by_id(query_id)
sent_email_model = email_models.BulkEmailModel.get(
query_model.sent_email_model_id)
self.assertEqual(
sent_email_model.subject, 'subject')
self.assertEqual(
sent_email_model.html_body, 'body')
self.assertEqual(
sent_email_model.sender_id, self.submitter_id)
self.assertEqual(
sent_email_model.sender_email,
'%s <%s>' % (self.SUBMITTER_USERNAME, self.SUBMITTER_EMAIL))
self.assertEqual(
sent_email_model.intent,
feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION)
# Check that BulkEmailModel id is stored in UsetBulkEmailModel of
# recipients.
recipient_a = user_models.UserBulkEmailsModel.get(self.user_a_id)
self.assertEqual(
recipient_a.sent_email_model_ids,
[query_model.sent_email_model_id])
recipient_b = user_models.UserBulkEmailsModel.get(self.user_b_id)
self.assertEqual(
recipient_b.sent_email_model_ids,
[query_model.sent_email_model_id])
def test_email_dashboard_result_post_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
params = {
'email_body': 'valid_email_body',
'email_subject': 'valid_email_subject',
'email_intent': 'bulk_email_create_exploration',
'max_recipients': None
}
response = self.post_json(
'/emaildashboardresult/%s' % 'invalid_query_id', params,
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], '400 Invalid query id.')
self.logout()
def test_email_dashboard_result_post_with_invalid_user_raises_401(self):
self.login(self.NEW_SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]
).put()
csrf_token = self.get_new_csrf_token()
params = {
'email_body': 'valid_email_body',
'email_subject': 'valid_email_subject',
'email_intent': 'bulk_email_create_exploration',
'max_recipients': None
}
response = self.post_json(
'/emaildashboardresult/%s' % query_id, params,
csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'],
'%s is not an authorized user for this query.' % (
self.NEW_SUBMITTER_USERNAME))
self.logout()
def test_that_no_emails_are_sent_if_query_is_canceled(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[self.user_a_id, self.user_b_id]
).put()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardcancelresult/%s' % query_id, {},
csrf_token=csrf_token)
self.logout()
query_model = user_models.UserQueryModel.get_by_id(query_id)
self.assertEqual(
query_model.query_status, feconf.USER_QUERY_STATUS_ARCHIVED)
self.assertTrue(query_model.deleted)
# Check that no email is sent to qualified users.
messages_a = self._get_sent_email_messages(
self.USER_A_EMAIL)
self.assertEqual(len(messages_a), 0)
messages_b = self._get_sent_email_messages(
self.USER_B_EMAIL)
self.assertEqual(len(messages_b), 0)
def test_cancel_email_handler_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardcancelresult/%s' % 'invalid_query_id', {},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], '400 Invalid query id.')
self.logout()
def test_cancel_email_handler_with_invalid_user_raises_401(self):
self.login(self.NEW_SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]
).put()
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardcancelresult/%s' % query_id, {},
csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'],
'%s is not an authorized user for this query.' % (
self.NEW_SUBMITTER_USERNAME))
self.logout()
def test_that_test_email_for_bulk_emails_is_sent(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[self.user_a_id, self.user_b_id]
).put()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
email_subject = 'email_subject'
email_body = 'email_body'
# Check that correct test email is sent.
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % query_id, {
'email_body': email_body,
'email_subject': email_subject
}, csrf_token=csrf_token)
self.logout()
query_model = user_models.UserQueryModel.get(query_id)
# Check that correct test email is sent to submitter of query.
# One email is sent when query is completed and other is test email.
test_email_html_body = (
'[This is a test email.]<br><br> %s' % email_body)
test_email_text_body = '[This is a test email.]\n\n %s' % email_body
messages = self._get_sent_email_messages(self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, test_email_html_body)
self.assertEqual(messages[0].body, test_email_text_body)
all_model = email_models.SentEmailModel.query().fetch()
self.assertEqual(len(all_model), 1)
sent_email_model = all_model[0]
self.assertEqual(
sent_email_model.subject, email_subject)
self.assertEqual(
sent_email_model.html_body, test_email_html_body)
self.assertEqual(
sent_email_model.recipient_id, query_model.submitter_id)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.intent, feconf.BULK_EMAIL_INTENT_TEST)
def test_bulk_email_handler_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % 'invalid_query_id', {
'email_subject': 'valid_email_subject',
'email_body': 'valid_email_body'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], '400 Invalid query id.')
self.logout()
def test_bulk_email_handler_with_invalid_user_raises_401(self):
self.login(self.NEW_SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]
).put()
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % query_id, {
'email_subject': 'valid_email_subject',
'email_body': 'valid_email_body'
}, csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'],
'%s is not an authorized user for this query.' % (
self.NEW_SUBMITTER_USERNAME))
self.logout()
def test_handler_with_invalid_num_queries_to_fetch_raises_error_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_json(
'/emaildashboarddatahandler',
params={'invalid_param_key': '2'},
expected_status_int=400)
error_msg = (
'Missing key in handler args: num_queries_to_fetch.\n'
'Found extra args: [\'invalid_param_key\'].')
self.assertEqual(
response['error'], error_msg)
self.logout()
def test_email_dashboard_data_handler(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_json(
'/emaildashboarddatahandler',
params={'num_queries_to_fetch': 1})
self.assertEqual(response['recent_queries'], [])
user_query_id = user_query_services.save_new_user_query(
self.submitter_id, self.SAMPLE_QUERY_PARAM)
response = self.get_json(
'/emaildashboarddatahandler',
params={'num_queries_to_fetch': 1})
self.assertEqual(len(response['recent_queries']), 1)
recent_query = response['recent_queries'][0]
self.assertEqual(recent_query['id'], user_query_id)
self.assertEqual(
recent_query['status'], feconf.USER_QUERY_STATUS_PROCESSING)
self.assertNotIn('submitter_id', recent_query)
self.logout()
|
<filename>core/controllers/email_dashboard_test.py
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for email dashboard handler."""
from __future__ import annotations
from core import feconf
from core.domain import user_query_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
(user_models, email_models) = models.Registry.import_models(
[models.NAMES.user, models.NAMES.email])
class EmailDashboardDataHandlerTests(test_utils.GenericTestBase):
SUBMITTER_EMAIL = '<EMAIL>'
SUBMITTER_USERNAME = 'submit'
USER_A_EMAIL = '<EMAIL>'
USER_A_USERNAME = 'a'
SAMPLE_QUERY_PARAM = {
'inactive_in_last_n_days': 10,
'created_at_least_n_exps': 5,
'has_not_logged_in_for_n_days': 30
}
def setUp(self):
super(EmailDashboardDataHandlerTests, self).setUp()
self.signup(self.SUBMITTER_EMAIL, self.SUBMITTER_USERNAME)
self.submitter_id = self.get_user_id_from_email(
self.SUBMITTER_EMAIL)
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(
self.USER_A_EMAIL)
self.set_curriculum_admins([self.SUBMITTER_USERNAME])
def test_query_status_check_handler_with_invalid_query_id_raises_400(
self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_json(
'/querystatuscheck', params={'query_id': 'invalid_query_id'},
expected_status_int=400)
self.assertEqual(response['error'], 'Invalid query id.')
self.logout()
def test_query_status_check_handler(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
user_query_id = user_query_services.save_new_user_query(
self.submitter_id, self.SAMPLE_QUERY_PARAM)
query_data = self.get_json(
'/querystatuscheck', params={'query_id': user_query_id})['query']
self.assertEqual(query_data['id'], user_query_id)
self.assertEqual(
query_data['status'], feconf.USER_QUERY_STATUS_PROCESSING)
self.assertEqual(
query_data['submitter_username'], self.SUBMITTER_USERNAME)
self.assertNotIn('submitter_id', query_data)
self.logout()
def test_that_page_is_accessible_to_authorised_users_only(self):
# Make sure that only authorised users can access query pages.
self.login(self.USER_A_EMAIL)
with self.assertRaisesRegex(Exception, '401 Unauthorized'):
self.get_html_response('/emaildashboard')
with self.assertRaisesRegex(Exception, '401 Unauthorized'):
self.get_html_response(
'/querystatuscheck?query_id=%s' % 'valid_query_id')
self.logout()
def test_that_exception_is_raised_for_invalid_input(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': 2,
'inactive_in_last_n_days': 5,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': 'None',
'edited_at_least_n_exps': None,
'created_collection': True,
'fake_key': 2
}}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_starting_job(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': 2,
'inactive_in_last_n_days': 5,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': None,
'edited_at_least_n_exps': None,
'created_collection': True,
}}, csrf_token=csrf_token)
self.logout()
def test_email_dashboard_page(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_html_response('/emaildashboard')
self.assertIn(b'{"title": "Email Dashboard - Oppia"})', response.body)
self.logout()
class EmailDashboardResultTests(test_utils.EmailTestBase):
"""Tests for email dashboard result handler."""
USER_A_EMAIL = '<EMAIL>'
USER_A_USERNAME = 'a'
USER_B_EMAIL = '<EMAIL>'
USER_B_USERNAME = 'b'
SUBMITTER_EMAIL = '<EMAIL>'
SUBMITTER_USERNAME = 'submit'
NEW_SUBMITTER_EMAIL = '<EMAIL>'
NEW_SUBMITTER_USERNAME = 'submit2'
EXP_ID_1 = 'exp_1'
EXP_ID_2 = 'exp_2'
SAMPLE_QUERY_PARAM = {
'inactive_in_last_n_days': 10,
'created_at_least_n_exps': 5,
'has_not_logged_in_for_n_days': 30
}
def setUp(self):
super(EmailDashboardResultTests, self).setUp()
# User A has one created exploration.
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.signup(feconf.SYSTEM_EMAIL_ADDRESS, 'systemUser')
self.user_a_id = self.get_user_id_from_email(
self.USER_A_EMAIL)
user_services.update_email_preferences(
self.user_a_id, True, True, True, True)
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_a_id, end_state_name='End')
# User B has one created exploration.
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(
self.USER_B_EMAIL)
user_services.update_email_preferences(
self.user_b_id, True, True, True, True)
self.save_new_valid_exploration(
self.EXP_ID_2, self.user_b_id, end_state_name='End')
# Submitter and new_submitter are submitter of query.
self.signup(self.SUBMITTER_EMAIL, self.SUBMITTER_USERNAME)
self.submitter_id = self.get_user_id_from_email(
self.SUBMITTER_EMAIL)
self.signup(self.NEW_SUBMITTER_EMAIL, self.NEW_SUBMITTER_USERNAME)
self.new_submitter_id = self.get_user_id_from_email(
self.NEW_SUBMITTER_EMAIL)
self.set_curriculum_admins(
[self.SUBMITTER_USERNAME, self.NEW_SUBMITTER_USERNAME])
def test_email_dashboard_result_page(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]).put()
response = self.get_html_response('/emaildashboardresult/%s' % query_id)
self.assertIn(
b'{"title": "Email Dashboard Result - Oppia"})', response.body)
self.logout()
def test_email_dashboard_result_page_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_html_response(
'/emaildashboardresult/aaa', expected_status_int=400)
self.assertIn(
b'<oppia-error-page-root></oppia-error-page-root>', response.body)
self.logout()
def test_email_dashboard_result_page_with_invalid_user_raises_401(self):
self.login(self.NEW_SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]
).put()
response = self.get_html_response(
'/emaildashboardresult/%s' % query_id, expected_status_int=401)
self.assertIn(
b'<oppia-error-page-root></oppia-error-page-root>', response.body)
self.logout()
def test_email_dashboard_result_post_passes(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
query_model = user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[self.user_a_id, self.user_b_id]
)
query_model.put()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
# Send email from email dashboard result page.
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardresult/%s' % query_id, {
'email_subject': 'subject',
'email_body': 'body',
'max_recipients': None,
'email_intent': 'bulk_email_create_exploration'
}, csrf_token=csrf_token)
self.logout()
# Check that emails are sent to qualified users.
messages_a = self._get_sent_email_messages(
self.USER_A_EMAIL)
self.assertEqual(len(messages_a), 1)
self.assertEqual(messages_a[0].html, 'body')
self.assertEqual(messages_a[0].body, 'body')
messages_b = self._get_sent_email_messages(
self.USER_B_EMAIL)
self.assertEqual(len(messages_b), 1)
self.assertEqual(messages_b[0].html, 'body')
self.assertEqual(messages_b[0].body, 'body')
# Check that correct email model is stored in backend.
query_model = user_models.UserQueryModel.get_by_id(query_id)
sent_email_model = email_models.BulkEmailModel.get(
query_model.sent_email_model_id)
self.assertEqual(
sent_email_model.subject, 'subject')
self.assertEqual(
sent_email_model.html_body, 'body')
self.assertEqual(
sent_email_model.sender_id, self.submitter_id)
self.assertEqual(
sent_email_model.sender_email,
'%s <%s>' % (self.SUBMITTER_USERNAME, self.SUBMITTER_EMAIL))
self.assertEqual(
sent_email_model.intent,
feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION)
# Check that BulkEmailModel id is stored in UsetBulkEmailModel of
# recipients.
recipient_a = user_models.UserBulkEmailsModel.get(self.user_a_id)
self.assertEqual(
recipient_a.sent_email_model_ids,
[query_model.sent_email_model_id])
recipient_b = user_models.UserBulkEmailsModel.get(self.user_b_id)
self.assertEqual(
recipient_b.sent_email_model_ids,
[query_model.sent_email_model_id])
def test_email_dashboard_result_post_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
params = {
'email_body': 'valid_email_body',
'email_subject': 'valid_email_subject',
'email_intent': 'bulk_email_create_exploration',
'max_recipients': None
}
response = self.post_json(
'/emaildashboardresult/%s' % 'invalid_query_id', params,
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], '400 Invalid query id.')
self.logout()
def test_email_dashboard_result_post_with_invalid_user_raises_401(self):
self.login(self.NEW_SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]
).put()
csrf_token = self.get_new_csrf_token()
params = {
'email_body': 'valid_email_body',
'email_subject': 'valid_email_subject',
'email_intent': 'bulk_email_create_exploration',
'max_recipients': None
}
response = self.post_json(
'/emaildashboardresult/%s' % query_id, params,
csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'],
'%s is not an authorized user for this query.' % (
self.NEW_SUBMITTER_USERNAME))
self.logout()
def test_that_no_emails_are_sent_if_query_is_canceled(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[self.user_a_id, self.user_b_id]
).put()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardcancelresult/%s' % query_id, {},
csrf_token=csrf_token)
self.logout()
query_model = user_models.UserQueryModel.get_by_id(query_id)
self.assertEqual(
query_model.query_status, feconf.USER_QUERY_STATUS_ARCHIVED)
self.assertTrue(query_model.deleted)
# Check that no email is sent to qualified users.
messages_a = self._get_sent_email_messages(
self.USER_A_EMAIL)
self.assertEqual(len(messages_a), 0)
messages_b = self._get_sent_email_messages(
self.USER_B_EMAIL)
self.assertEqual(len(messages_b), 0)
def test_cancel_email_handler_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardcancelresult/%s' % 'invalid_query_id', {},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], '400 Invalid query id.')
self.logout()
def test_cancel_email_handler_with_invalid_user_raises_401(self):
self.login(self.NEW_SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]
).put()
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardcancelresult/%s' % query_id, {},
csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'],
'%s is not an authorized user for this query.' % (
self.NEW_SUBMITTER_USERNAME))
self.logout()
def test_that_test_email_for_bulk_emails_is_sent(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[self.user_a_id, self.user_b_id]
).put()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
email_subject = 'email_subject'
email_body = 'email_body'
# Check that correct test email is sent.
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % query_id, {
'email_body': email_body,
'email_subject': email_subject
}, csrf_token=csrf_token)
self.logout()
query_model = user_models.UserQueryModel.get(query_id)
# Check that correct test email is sent to submitter of query.
# One email is sent when query is completed and other is test email.
test_email_html_body = (
'[This is a test email.]<br><br> %s' % email_body)
test_email_text_body = '[This is a test email.]\n\n %s' % email_body
messages = self._get_sent_email_messages(self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, test_email_html_body)
self.assertEqual(messages[0].body, test_email_text_body)
all_model = email_models.SentEmailModel.query().fetch()
self.assertEqual(len(all_model), 1)
sent_email_model = all_model[0]
self.assertEqual(
sent_email_model.subject, email_subject)
self.assertEqual(
sent_email_model.html_body, test_email_html_body)
self.assertEqual(
sent_email_model.recipient_id, query_model.submitter_id)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.intent, feconf.BULK_EMAIL_INTENT_TEST)
def test_bulk_email_handler_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % 'invalid_query_id', {
'email_subject': 'valid_email_subject',
'email_body': 'valid_email_body'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], '400 Invalid query id.')
self.logout()
def test_bulk_email_handler_with_invalid_user_raises_401(self):
self.login(self.NEW_SUBMITTER_EMAIL, is_super_admin=True)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]
).put()
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % query_id, {
'email_subject': 'valid_email_subject',
'email_body': 'valid_email_body'
}, csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'],
'%s is not an authorized user for this query.' % (
self.NEW_SUBMITTER_USERNAME))
self.logout()
def test_handler_with_invalid_num_queries_to_fetch_raises_error_400(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_json(
'/emaildashboarddatahandler',
params={'invalid_param_key': '2'},
expected_status_int=400)
error_msg = (
'Missing key in handler args: num_queries_to_fetch.\n'
'Found extra args: [\'invalid_param_key\'].')
self.assertEqual(
response['error'], error_msg)
self.logout()
def test_email_dashboard_data_handler(self):
self.login(self.SUBMITTER_EMAIL, is_super_admin=True)
response = self.get_json(
'/emaildashboarddatahandler',
params={'num_queries_to_fetch': 1})
self.assertEqual(response['recent_queries'], [])
user_query_id = user_query_services.save_new_user_query(
self.submitter_id, self.SAMPLE_QUERY_PARAM)
response = self.get_json(
'/emaildashboarddatahandler',
params={'num_queries_to_fetch': 1})
self.assertEqual(len(response['recent_queries']), 1)
recent_query = response['recent_queries'][0]
self.assertEqual(recent_query['id'], user_query_id)
self.assertEqual(
recent_query['status'], feconf.USER_QUERY_STATUS_PROCESSING)
self.assertNotIn('submitter_id', recent_query)
self.logout()
|
en
| 0.904554
|
# Copyright 2016 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for email dashboard handler. # Make sure that only authorised users can access query pages. Tests for email dashboard result handler. # User A has one created exploration. # User B has one created exploration. # Submitter and new_submitter are submitter of query. # Send email from email dashboard result page. # Check that emails are sent to qualified users. # Check that correct email model is stored in backend. # Check that BulkEmailModel id is stored in UsetBulkEmailModel of # recipients. # Check that no email is sent to qualified users. # Check that correct test email is sent. # Check that correct test email is sent to submitter of query. # One email is sent when query is completed and other is test email.
| 2.031169
| 2
|
regression/test.py
|
abhishekiitm/age-estimator-bot
| 0
|
6629731
|
"""
This code is adapted from the Github repository shared by user Nicholasli1995 (<NAME>) at https://github.com/Nicholasli1995/VisualizingNDF
Age estimation below uses the pretrained model shared in the Github repo mentioned above. All copyrights belong to them.
Copyright (c) 2019 <NAME>
Please check out their awesome repository as well as their papers. Links are in the Github repository.
Lastly, big thanks to them for open sourcing their code under the MIT License!
"""
import torch
import argparse
import vis_utils
from data_prepare import prepare_db
from pathlib import Path
import torch.utils.data
import torchvision.transforms.functional as transform_f
import imageio as io
import numpy as np
import logging
import PIL
import cv2
import urllib
def pred_age_from_image_url(url):
# load model
model_path = "../pre-trained/CACD_MAE_4.59.pth"
model = torch.load(model_path)
model.cuda()
mean = [0.432, 0.359, 0.320]
std = [0.30, 0.264, 0.252]
req = urllib.request.urlopen(url)
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
image = cv2.imdecode(arr, -1) # 'Load it as it is'
# image_path = '../test/t4.jpg'
# image = cv2.imread(image_path)
face_cascade = cv2.CascadeClassifier("../haarcascade_frontalface_default.xml")
face_cascade.load('../haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# return error if no faces are detected
if len(faces) == 0:
return "Error"
for (x, y, w, h) in faces:
crop = image[y:y + h, x:x + w]
break
resized_crop = cv2.resize(crop, (256, 256))
# cv2.imshow('crop', resized_crop)
image = cv2.cvtColor(resized_crop, cv2.COLOR_BGR2RGB)
image = PIL.Image.fromarray(image)
# image = PIL.Image.open(image_path)
image = transform_f.to_tensor(image)
image = transform_f.normalize(image, mean=mean, std=std)
sample = image.unsqueeze(0)
# predict
pred, _, cache = model(sample.cuda(), save_flag = True)
return (pred.data.item()*90)
|
"""
This code is adapted from the Github repository shared by user Nicholasli1995 (<NAME>) at https://github.com/Nicholasli1995/VisualizingNDF
Age estimation below uses the pretrained model shared in the Github repo mentioned above. All copyrights belong to them.
Copyright (c) 2019 <NAME>
Please check out their awesome repository as well as their papers. Links are in the Github repository.
Lastly, big thanks to them for open sourcing their code under the MIT License!
"""
import torch
import argparse
import vis_utils
from data_prepare import prepare_db
from pathlib import Path
import torch.utils.data
import torchvision.transforms.functional as transform_f
import imageio as io
import numpy as np
import logging
import PIL
import cv2
import urllib
def pred_age_from_image_url(url):
# load model
model_path = "../pre-trained/CACD_MAE_4.59.pth"
model = torch.load(model_path)
model.cuda()
mean = [0.432, 0.359, 0.320]
std = [0.30, 0.264, 0.252]
req = urllib.request.urlopen(url)
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
image = cv2.imdecode(arr, -1) # 'Load it as it is'
# image_path = '../test/t4.jpg'
# image = cv2.imread(image_path)
face_cascade = cv2.CascadeClassifier("../haarcascade_frontalface_default.xml")
face_cascade.load('../haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# return error if no faces are detected
if len(faces) == 0:
return "Error"
for (x, y, w, h) in faces:
crop = image[y:y + h, x:x + w]
break
resized_crop = cv2.resize(crop, (256, 256))
# cv2.imshow('crop', resized_crop)
image = cv2.cvtColor(resized_crop, cv2.COLOR_BGR2RGB)
image = PIL.Image.fromarray(image)
# image = PIL.Image.open(image_path)
image = transform_f.to_tensor(image)
image = transform_f.normalize(image, mean=mean, std=std)
sample = image.unsqueeze(0)
# predict
pred, _, cache = model(sample.cuda(), save_flag = True)
return (pred.data.item()*90)
|
en
| 0.901047
|
This code is adapted from the Github repository shared by user Nicholasli1995 (<NAME>) at https://github.com/Nicholasli1995/VisualizingNDF Age estimation below uses the pretrained model shared in the Github repo mentioned above. All copyrights belong to them. Copyright (c) 2019 <NAME> Please check out their awesome repository as well as their papers. Links are in the Github repository. Lastly, big thanks to them for open sourcing their code under the MIT License! # load model # 'Load it as it is' # image_path = '../test/t4.jpg' # image = cv2.imread(image_path) # return error if no faces are detected # cv2.imshow('crop', resized_crop) # image = PIL.Image.open(image_path) # predict
| 2.462223
| 2
|
tests/test_model_spec.py
|
lorentzenchr/formulaic
| 95
|
6629732
|
<filename>tests/test_model_spec.py
from collections import OrderedDict
import pytest
import pandas
from formulaic import Formula
class TestModelSpec:
@pytest.fixture
def data(self):
return pandas.DataFrame(
{
"A": ["a", "b", "c"],
"a": [0, 0, 1],
}
)
@pytest.fixture
def data2(self):
return pandas.DataFrame(
{
"A": ["a", "a", "a"],
"a": [0, 0, 1],
}
)
@pytest.fixture
def formula(self):
return Formula("a + A + a:A")
@pytest.fixture
def model_spec(self, formula, data):
return formula.get_model_matrix(data).model_spec
def test_attributes(self, model_spec):
assert model_spec.formula == Formula("a + A + a:A")
assert model_spec.ensure_full_rank is True
assert model_spec.materializer == "pandas"
assert model_spec.feature_names == [
"Intercept",
"A[T.b]",
"A[T.c]",
"a",
"A[T.b]:a",
"A[T.c]:a",
]
assert model_spec.feature_indices == OrderedDict(
[
("Intercept", 0),
("A[T.b]", 1),
("A[T.c]", 2),
("a", 3),
("A[T.b]:a", 4),
("A[T.c]:a", 5),
]
)
assert model_spec.term_slices == OrderedDict(
[
("1", slice(0, 1)),
("A", slice(1, 3)),
("a", slice(3, 4)),
("A:a", slice(4, 6)),
]
)
def test_get_model_matrix(self, model_spec, data2):
m = model_spec.get_model_matrix(data2)
assert isinstance(m, pandas.DataFrame)
assert list(m.columns) == model_spec.feature_names
model_spec.materializer = None
m2 = model_spec.get_model_matrix(data2)
assert isinstance(m2, pandas.DataFrame)
assert list(m2.columns) == model_spec.feature_names
def test_differentiate(self, model_spec, formula):
assert model_spec.differentiate("a").formula == formula.differentiate("a")
|
<filename>tests/test_model_spec.py
from collections import OrderedDict
import pytest
import pandas
from formulaic import Formula
class TestModelSpec:
@pytest.fixture
def data(self):
return pandas.DataFrame(
{
"A": ["a", "b", "c"],
"a": [0, 0, 1],
}
)
@pytest.fixture
def data2(self):
return pandas.DataFrame(
{
"A": ["a", "a", "a"],
"a": [0, 0, 1],
}
)
@pytest.fixture
def formula(self):
return Formula("a + A + a:A")
@pytest.fixture
def model_spec(self, formula, data):
return formula.get_model_matrix(data).model_spec
def test_attributes(self, model_spec):
assert model_spec.formula == Formula("a + A + a:A")
assert model_spec.ensure_full_rank is True
assert model_spec.materializer == "pandas"
assert model_spec.feature_names == [
"Intercept",
"A[T.b]",
"A[T.c]",
"a",
"A[T.b]:a",
"A[T.c]:a",
]
assert model_spec.feature_indices == OrderedDict(
[
("Intercept", 0),
("A[T.b]", 1),
("A[T.c]", 2),
("a", 3),
("A[T.b]:a", 4),
("A[T.c]:a", 5),
]
)
assert model_spec.term_slices == OrderedDict(
[
("1", slice(0, 1)),
("A", slice(1, 3)),
("a", slice(3, 4)),
("A:a", slice(4, 6)),
]
)
def test_get_model_matrix(self, model_spec, data2):
m = model_spec.get_model_matrix(data2)
assert isinstance(m, pandas.DataFrame)
assert list(m.columns) == model_spec.feature_names
model_spec.materializer = None
m2 = model_spec.get_model_matrix(data2)
assert isinstance(m2, pandas.DataFrame)
assert list(m2.columns) == model_spec.feature_names
def test_differentiate(self, model_spec, formula):
assert model_spec.differentiate("a").formula == formula.differentiate("a")
|
none
| 1
| 2.302783
| 2
|
|
tests/test.ui.013.graph.py
|
ceccopierangiolieugenio/py-ttk
| 0
|
6629733
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, os, argparse, math, random
sys.path.append(os.path.join(sys.path[0],'..'))
import TermTk as ttk
def demoGraph(root= None):
frame = ttk.TTkFrame(parent=root, border=False, layout=ttk.TTkGridLayout())
graphWidget1 = ttk.TTkGraph(color=ttk.TTkColor.fg('#00dddd', modifier=ttk.TTkColorGradient(increment=-20)))
graphWidget2 = ttk.TTkGraph(direction=ttk.TTkK.LEFT, color=ttk.TTkColor.fg('#ffdd00', modifier=ttk.TTkColorGradient(increment= 10)))
graphWidget3 = ttk.TTkGraph(color=ttk.TTkColor.fg('#dd00dd', modifier=ttk.TTkColorGradient(increment=-10)))
graphWidget4 = ttk.TTkGraph(color=ttk.TTkColor.fg('#00dd44', modifier=ttk.TTkColorGradient(increment=-15)))
frame.layout().addWidget(graphWidget1, 0,0)
frame.layout().addWidget(graphWidget2, 0,1)
frame.layout().addWidget(graphWidget3, 1,0)
frame.layout().addWidget(graphWidget4, 1,1)
class timerEvent():
def __init__(self, w, type):
self.timer = ttk.TTkTimer()
self.val = 10
self.switch = False
self.w = w
self.type = type
self.timer.timeout.connect(self.timerEvent)
self.timer.start(1)
@ttk.pyTTkSlot()
def timerEvent(self):
self.switch = not self.switch
if self.type == 1: # Simple sin
val = math.sin(self.val*math.pi/40)*4*10
if self.type == 2: # Double sin
offset = 15
if self.switch: val = math.sin(self.val*math.pi/40)*4*10
else: val = math.sin((self.val+offset)*math.pi/40)*4*7
if self.type == 3: # random
val = random.uniform(-40,+40)
if self.type == 4: # mix rand and sin
if self.switch: val = math.sin(self.val*math.pi/40)*4*10
else: val = random.uniform(-40,+40)
self.val+=1
self.w.addValue(val)
self.timer.start(0.1)
timerEvent(graphWidget1, 1)
timerEvent(graphWidget2, 2)
timerEvent(graphWidget3, 3)
timerEvent(graphWidget4, 4)
return frame
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', help='Full Screen', action='store_true')
args = parser.parse_args()
ttk.TTkLog.use_default_file_logging()
root = ttk.TTk()
if args.f:
rootGraph = root
root.setLayout(ttk.TTkGridLayout())
else:
rootGraph = ttk.TTkWindow(parent=root,pos=(1,1), size=(100,40), title="Test Graph", border=True, layout=ttk.TTkGridLayout())
demoGraph(rootGraph)
root.mainloop()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, os, argparse, math, random
sys.path.append(os.path.join(sys.path[0],'..'))
import TermTk as ttk
def demoGraph(root= None):
frame = ttk.TTkFrame(parent=root, border=False, layout=ttk.TTkGridLayout())
graphWidget1 = ttk.TTkGraph(color=ttk.TTkColor.fg('#00dddd', modifier=ttk.TTkColorGradient(increment=-20)))
graphWidget2 = ttk.TTkGraph(direction=ttk.TTkK.LEFT, color=ttk.TTkColor.fg('#ffdd00', modifier=ttk.TTkColorGradient(increment= 10)))
graphWidget3 = ttk.TTkGraph(color=ttk.TTkColor.fg('#dd00dd', modifier=ttk.TTkColorGradient(increment=-10)))
graphWidget4 = ttk.TTkGraph(color=ttk.TTkColor.fg('#00dd44', modifier=ttk.TTkColorGradient(increment=-15)))
frame.layout().addWidget(graphWidget1, 0,0)
frame.layout().addWidget(graphWidget2, 0,1)
frame.layout().addWidget(graphWidget3, 1,0)
frame.layout().addWidget(graphWidget4, 1,1)
class timerEvent():
def __init__(self, w, type):
self.timer = ttk.TTkTimer()
self.val = 10
self.switch = False
self.w = w
self.type = type
self.timer.timeout.connect(self.timerEvent)
self.timer.start(1)
@ttk.pyTTkSlot()
def timerEvent(self):
self.switch = not self.switch
if self.type == 1: # Simple sin
val = math.sin(self.val*math.pi/40)*4*10
if self.type == 2: # Double sin
offset = 15
if self.switch: val = math.sin(self.val*math.pi/40)*4*10
else: val = math.sin((self.val+offset)*math.pi/40)*4*7
if self.type == 3: # random
val = random.uniform(-40,+40)
if self.type == 4: # mix rand and sin
if self.switch: val = math.sin(self.val*math.pi/40)*4*10
else: val = random.uniform(-40,+40)
self.val+=1
self.w.addValue(val)
self.timer.start(0.1)
timerEvent(graphWidget1, 1)
timerEvent(graphWidget2, 2)
timerEvent(graphWidget3, 3)
timerEvent(graphWidget4, 4)
return frame
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', help='Full Screen', action='store_true')
args = parser.parse_args()
ttk.TTkLog.use_default_file_logging()
root = ttk.TTk()
if args.f:
rootGraph = root
root.setLayout(ttk.TTkGridLayout())
else:
rootGraph = ttk.TTkWindow(parent=root,pos=(1,1), size=(100,40), title="Test Graph", border=True, layout=ttk.TTkGridLayout())
demoGraph(rootGraph)
root.mainloop()
if __name__ == "__main__":
main()
|
en
| 0.723879
|
#!/usr/bin/env python3 # MIT License # # Copyright (c) 2021 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Simple sin # Double sin # random # mix rand and sin
| 2.075064
| 2
|
main.py
|
rasfmar/fashion-detector
| 0
|
6629734
|
<gh_stars>0
#!/usr/bin/python3
import numpy as np;
import tensorflow as tf;
from tensorflow import keras;
import matplotlib.pyplot as plt;
def main():
# load data set
dataset = keras.datasets.fashion_mnist;
(train_images, train_labels), (test_images, test_labels) = dataset.load_data();
# classification names
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", \
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"];
# preprocess dataset such that all image values are in the range of 0 to 1
train_images = train_images / 255.0;
test_images = test_images / 255.0;
# build model
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10)
]);
# compile model
model.compile(optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"])
# train model
model.fit(train_images, train_labels, epochs=20);
# test model
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2);
print(f"Model accuracy: {test_acc}");
if __name__ == "__main__":
main();
|
#!/usr/bin/python3
import numpy as np;
import tensorflow as tf;
from tensorflow import keras;
import matplotlib.pyplot as plt;
def main():
# load data set
dataset = keras.datasets.fashion_mnist;
(train_images, train_labels), (test_images, test_labels) = dataset.load_data();
# classification names
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", \
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"];
# preprocess dataset such that all image values are in the range of 0 to 1
train_images = train_images / 255.0;
test_images = test_images / 255.0;
# build model
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10)
]);
# compile model
model.compile(optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"])
# train model
model.fit(train_images, train_labels, epochs=20);
# test model
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2);
print(f"Model accuracy: {test_acc}");
if __name__ == "__main__":
main();
|
en
| 0.757769
|
#!/usr/bin/python3 # load data set # classification names # preprocess dataset such that all image values are in the range of 0 to 1 # build model # compile model # train model # test model
| 3.303725
| 3
|
time_entries_api.py
|
anshajk/clockify-bot-template
| 0
|
6629735
|
<reponame>anshajk/clockify-bot-template
import datetime as dt
import json
import pandas as pd
import pytz
import requests
from config import Config
BASE_API = "https://api.clockify.me/api/v1"
TIME_ENTRIES_ENDPOINT = "/workspaces/{workspaceId}/user/{userId}/time-entries"
PROJECTS_ENDPOINT = "/workspaces/{workspaceId}/projects"
class TimeEntriesApi(object):
def __init__(self) -> None:
self.headers = {"X-Api-Key": Config.api_key}
self.time_api = BASE_API + TIME_ENTRIES_ENDPOINT.format(
workspaceId=Config.workspace_id, userId=Config.user_id
)
def get_recent_entries(self) -> list:
resp = requests.get(self.time_api, headers=self.headers)
entries = json.loads(resp.text)
return entries
def get_projects(self) -> list:
project_api = BASE_API + PROJECTS_ENDPOINT.format(
workspaceId=Config.workspace_id
)
resp = requests.get(project_api, headers=self.headers)
projects = json.loads(resp.text)
return projects
def get_todays_entries(self, entries: list) -> list:
entries_today = []
today = dt.datetime.now(pytz.utc).replace(
hour=0, minute=0, second=0, microsecond=0
)
window_start = today - dt.timedelta(hours=5, minutes=30)
window_end = today.replace(hour=18, minute=30)
for entry in entries:
time_interval = entry["timeInterval"]
if not time_interval["end"]:
continue
start = pd.to_datetime(time_interval["start"])
end = pd.to_datetime(time_interval["end"])
if start > window_start and end < window_end:
entries_today.append(
dict(project_id=entry["projectId"], start=start, end=end)
)
return entries_today
def get_projects_df(self, projects: list) -> pd.DataFrame:
projects_list = []
for project in projects:
projects_list.append(dict(project_id=project["id"], name=project["name"]))
projects_df = pd.DataFrame(projects_list)
return projects_df
def get_entries_df(self, entries: list) -> pd.DataFrame:
entry_df = None
if entries:
entry_df = pd.DataFrame(entries)
return entry_df
|
import datetime as dt
import json
import pandas as pd
import pytz
import requests
from config import Config
BASE_API = "https://api.clockify.me/api/v1"
TIME_ENTRIES_ENDPOINT = "/workspaces/{workspaceId}/user/{userId}/time-entries"
PROJECTS_ENDPOINT = "/workspaces/{workspaceId}/projects"
class TimeEntriesApi(object):
def __init__(self) -> None:
self.headers = {"X-Api-Key": Config.api_key}
self.time_api = BASE_API + TIME_ENTRIES_ENDPOINT.format(
workspaceId=Config.workspace_id, userId=Config.user_id
)
def get_recent_entries(self) -> list:
resp = requests.get(self.time_api, headers=self.headers)
entries = json.loads(resp.text)
return entries
def get_projects(self) -> list:
project_api = BASE_API + PROJECTS_ENDPOINT.format(
workspaceId=Config.workspace_id
)
resp = requests.get(project_api, headers=self.headers)
projects = json.loads(resp.text)
return projects
def get_todays_entries(self, entries: list) -> list:
entries_today = []
today = dt.datetime.now(pytz.utc).replace(
hour=0, minute=0, second=0, microsecond=0
)
window_start = today - dt.timedelta(hours=5, minutes=30)
window_end = today.replace(hour=18, minute=30)
for entry in entries:
time_interval = entry["timeInterval"]
if not time_interval["end"]:
continue
start = pd.to_datetime(time_interval["start"])
end = pd.to_datetime(time_interval["end"])
if start > window_start and end < window_end:
entries_today.append(
dict(project_id=entry["projectId"], start=start, end=end)
)
return entries_today
def get_projects_df(self, projects: list) -> pd.DataFrame:
projects_list = []
for project in projects:
projects_list.append(dict(project_id=project["id"], name=project["name"]))
projects_df = pd.DataFrame(projects_list)
return projects_df
def get_entries_df(self, entries: list) -> pd.DataFrame:
entry_df = None
if entries:
entry_df = pd.DataFrame(entries)
return entry_df
|
none
| 1
| 2.70852
| 3
|
|
viper/modules/yarascan.py
|
CLEAR-seclab/viper
| 0
|
6629736
|
# -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import string
import subprocess
from os.path import expanduser
try:
from scandir import walk
except ImportError:
from os import walk
from viper.common.abstracts import Module
from viper.core.database import Database
from viper.core.session import __sessions__
from viper.core.storage import get_sample_path
from viper.core.config import __config__
try:
import yara
HAVE_YARA = True
except ImportError:
HAVE_YARA = False
cfg = __config__
def string_printable(line):
line = str(line)
new_line = ''
for c in line:
if c in string.printable:
new_line += c
else:
new_line += '\\x' + c.encode('hex')
return new_line
class YaraScan(Module):
cmd = 'yara'
description = 'Scan stored files with Yara rules'
authors = ['nex']
def __init__(self):
super(YaraScan, self).__init__()
subparsers = self.parser.add_subparsers(dest='subname')
parser_scan = subparsers.add_parser('scan', help='Scan files with Yara signatures')
parser_scan.add_argument('-r', '--rule', help='Specify a ruleset file path (if none is specified, the rules in local storage are used)')
parser_scan.add_argument('-a', '--all', action='store_true', help='Scan all stored files (default if no session is open)')
parser_scan.add_argument('-t', '--tag', action='store_true', help='Tag Files with Rule Name (default is not to)')
parser_scan.add_argument('-v', '--verbose', action='store_true', help='Output a detailed overview of the matches and found offsets')
parser_rules = subparsers.add_parser('rules', help='Operate on Yara rules')
parser_rules.add_argument('-e', '--edit', help='Open an editor to edit the specified rule')
parser_rules.add_argument('-u', '--update', action='store_true', help='Download latest rules from selected repositories')
self.local_rules = os.path.join(expanduser('~'), '.viper', 'yara')
self.rules_paths = [
'/usr/share/viper/yara',
self.local_rules
]
def _get_rules(self):
# Retrieve the list of rules and populate a list.
rules = []
count = 1
# We loop through all rules paths (both in share as well as locally)
# and we populate the list of rules.
for root in self.rules_paths:
for folder, folders, files in walk(root):
for file_name in files:
# Skip if the extension is not right, could cause problems.
if not file_name.endswith('.yar') and not file_name.endswith('.yara'):
continue
rules.append([count, os.path.join(folder, file_name)])
count += 1
return rules
def scan(self):
arg_rule = self.args.rule
arg_scan_all = self.args.all
arg_tag = self.args.tag
arg_verbose = self.args.verbose
externals = {'filename': '', 'filepath': '', 'extension': '', 'filetype': ''}
# If a rule file is specified we compile that, otherwise all
# the rules we have stored locally.
if arg_rule:
# Check if the selected ruleset actually exists.
if not os.path.exists(arg_rule):
self.log('error', "The specified file does not exist at path {0}".format(arg_rule))
return
rules = yara.compile(arg_rule, externals=externals)
# Otherwise, we get all the rules that are stored locally and we
# load them in different namespaces.
else:
filepaths = dict()
for rule in self._get_rules():
# TODO: We pre-compile all rules individually to check whether they are
# loadable or not. This is pretty hacky, there must be a better way.
try:
yara.compile(rule[1], externals=externals)
except yara.SyntaxError as e:
self.log('warning', "Unable to compile rule {0}: {1}".format(rule[1], e))
continue
filepaths['namespace' + str(rule[0])] = rule[1]
rules = yara.compile(filepaths=filepaths, externals=externals, includes=False)
# Files to scan.
files = []
# If there is a session open and the user didn't specifically
# request to scan the full repository, we just add the currently
# opened file's path.
if __sessions__.is_set() and not arg_scan_all:
files.append(__sessions__.current.file)
# Otherwise we loop through all files in the repository and queue
# them up for scan.
else:
self.log('info', "Scanning all stored files (in the current project)...")
db = Database()
samples = db.find(key='all')
for sample in samples:
files.append(sample)
# Loop through all files to be scanned.
for entry in files:
if entry.size == 0:
continue
self.log('info', "Scanning {0} ({1})".format(entry.name, entry.sha256))
# Check if the entry has a path attribute. This happens when
# there is a session open. We need to distinguish this just for
# the cases where we're scanning an opened file which has not been
# stored yet.
if hasattr(entry, 'path'):
entry_path = entry.path
# This should be triggered only when scanning the full repository.
else:
entry_path = get_sample_path(entry.sha256)
# Check if the file exists before running the yara scan.
if not os.path.exists(entry_path):
self.log('error', "The file does not exist at path {0}".format(entry_path))
continue
rows = []
tag_list = []
found = False
# We need this just for some Yara rules.
try:
ext = os.path.splitext(entry.name)[1]
except Exception:
ext = ''
try:
matches = rules.match(entry_path, externals={'filename': entry.name, 'filepath': entry_path, 'extension': ext, 'filetype': entry.type})
except yara.Error as e:
self.log('error', "Yara scan for file {} ({}) failed: {}".format(entry.name, entry.sha256, e))
continue
for match in matches:
found = True
# Add a row for each string matched by the rule.
if arg_verbose:
for match_string in match.strings:
rows.append([
match.rule,
string_printable(match_string[1]),
string_printable(match_string[0]),
string_printable(match_string[2])]
)
else:
self.log('item', match.rule)
# Add matching rules to our list of tags.
# First it checks if there are tags specified in the metadata
# of the Yara rule.
match_tags = match.meta.get('tags')
# If not, use the rule name.
# TODO: as we add more and more yara rules, we might remove
# this option and only tag the file with rules that had
# tags specified in them.
if not match_tags:
match_tags = match.rule
# Add the tags to the list.
tag_list.append([entry.sha256, match_tags])
if arg_verbose and rows:
header = [
'Rule',
'String',
'Offset',
'Content'
]
self.log('table', dict(header=header, rows=rows))
# If we selected to add tags do that now.
if found and arg_tag:
db = Database()
for tag in tag_list:
db.add_tags(tag[0], tag[1])
# If in a session reset the session to see tags.
if __sessions__.is_set() and not arg_scan_all:
self.log('info', "Refreshing session to update attributes...")
__sessions__.new(__sessions__.current.file.path)
def rules(self):
arg_edit = self.args.edit
arg_update = self.args.update
rules = self._get_rules()
# If the user wants to edit a specific rule, loop through all of them
# identify which one to open, and launch the default editor.
if arg_edit:
for rule in rules:
if int(arg_edit) == rule[0]:
os.system('"${EDITOR:-nano}" ' + rule[1])
break
# Otherwise, just print the list.
# Check if the user wants to update rules.
elif arg_update:
# FIrst we create the local rules folder in case it doesn't exist.
if not os.path.exists(self.local_rules):
os.makedirs(self.local_rules)
# TODO: we definitely need a way for Config to parse lists appropriately.
urls = cfg.yara.repositories.split('\n')
for url in urls:
url = url.strip()
self.log('info', "Updating Yara rules from repository {}".format(url))
repo_name = url.rsplit('/', 1)[-1].rstrip('.git')
repo_path = os.path.join(self.local_rules, repo_name)
# If the repository has been cloned before, we gonna update it.
if os.path.exists(repo_path):
proc = subprocess.Popen(['git', 'pull'], cwd=repo_path)
# Otherwise, do first clone.
else:
proc = subprocess.Popen(['git', 'clone', url], cwd=self.local_rules)
proc.wait()
else:
self.log('table', dict(header=['#', 'Path'], rows=rules))
self.log('', "")
self.log('', "You can edit these rules by specifying --edit and the #")
def run(self):
super(YaraScan, self).run()
if self.args is None:
return
if not HAVE_YARA:
self.log('error', "Missing dependency, install yara")
return
if self.args.subname == 'scan':
self.scan()
elif self.args.subname == 'rules':
self.rules()
else:
self.log('error', 'At least one of the parameters is required')
self.usage()
|
# -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import string
import subprocess
from os.path import expanduser
try:
from scandir import walk
except ImportError:
from os import walk
from viper.common.abstracts import Module
from viper.core.database import Database
from viper.core.session import __sessions__
from viper.core.storage import get_sample_path
from viper.core.config import __config__
try:
import yara
HAVE_YARA = True
except ImportError:
HAVE_YARA = False
cfg = __config__
def string_printable(line):
line = str(line)
new_line = ''
for c in line:
if c in string.printable:
new_line += c
else:
new_line += '\\x' + c.encode('hex')
return new_line
class YaraScan(Module):
cmd = 'yara'
description = 'Scan stored files with Yara rules'
authors = ['nex']
def __init__(self):
super(YaraScan, self).__init__()
subparsers = self.parser.add_subparsers(dest='subname')
parser_scan = subparsers.add_parser('scan', help='Scan files with Yara signatures')
parser_scan.add_argument('-r', '--rule', help='Specify a ruleset file path (if none is specified, the rules in local storage are used)')
parser_scan.add_argument('-a', '--all', action='store_true', help='Scan all stored files (default if no session is open)')
parser_scan.add_argument('-t', '--tag', action='store_true', help='Tag Files with Rule Name (default is not to)')
parser_scan.add_argument('-v', '--verbose', action='store_true', help='Output a detailed overview of the matches and found offsets')
parser_rules = subparsers.add_parser('rules', help='Operate on Yara rules')
parser_rules.add_argument('-e', '--edit', help='Open an editor to edit the specified rule')
parser_rules.add_argument('-u', '--update', action='store_true', help='Download latest rules from selected repositories')
self.local_rules = os.path.join(expanduser('~'), '.viper', 'yara')
self.rules_paths = [
'/usr/share/viper/yara',
self.local_rules
]
def _get_rules(self):
# Retrieve the list of rules and populate a list.
rules = []
count = 1
# We loop through all rules paths (both in share as well as locally)
# and we populate the list of rules.
for root in self.rules_paths:
for folder, folders, files in walk(root):
for file_name in files:
# Skip if the extension is not right, could cause problems.
if not file_name.endswith('.yar') and not file_name.endswith('.yara'):
continue
rules.append([count, os.path.join(folder, file_name)])
count += 1
return rules
def scan(self):
arg_rule = self.args.rule
arg_scan_all = self.args.all
arg_tag = self.args.tag
arg_verbose = self.args.verbose
externals = {'filename': '', 'filepath': '', 'extension': '', 'filetype': ''}
# If a rule file is specified we compile that, otherwise all
# the rules we have stored locally.
if arg_rule:
# Check if the selected ruleset actually exists.
if not os.path.exists(arg_rule):
self.log('error', "The specified file does not exist at path {0}".format(arg_rule))
return
rules = yara.compile(arg_rule, externals=externals)
# Otherwise, we get all the rules that are stored locally and we
# load them in different namespaces.
else:
filepaths = dict()
for rule in self._get_rules():
# TODO: We pre-compile all rules individually to check whether they are
# loadable or not. This is pretty hacky, there must be a better way.
try:
yara.compile(rule[1], externals=externals)
except yara.SyntaxError as e:
self.log('warning', "Unable to compile rule {0}: {1}".format(rule[1], e))
continue
filepaths['namespace' + str(rule[0])] = rule[1]
rules = yara.compile(filepaths=filepaths, externals=externals, includes=False)
# Files to scan.
files = []
# If there is a session open and the user didn't specifically
# request to scan the full repository, we just add the currently
# opened file's path.
if __sessions__.is_set() and not arg_scan_all:
files.append(__sessions__.current.file)
# Otherwise we loop through all files in the repository and queue
# them up for scan.
else:
self.log('info', "Scanning all stored files (in the current project)...")
db = Database()
samples = db.find(key='all')
for sample in samples:
files.append(sample)
# Loop through all files to be scanned.
for entry in files:
if entry.size == 0:
continue
self.log('info', "Scanning {0} ({1})".format(entry.name, entry.sha256))
# Check if the entry has a path attribute. This happens when
# there is a session open. We need to distinguish this just for
# the cases where we're scanning an opened file which has not been
# stored yet.
if hasattr(entry, 'path'):
entry_path = entry.path
# This should be triggered only when scanning the full repository.
else:
entry_path = get_sample_path(entry.sha256)
# Check if the file exists before running the yara scan.
if not os.path.exists(entry_path):
self.log('error', "The file does not exist at path {0}".format(entry_path))
continue
rows = []
tag_list = []
found = False
# We need this just for some Yara rules.
try:
ext = os.path.splitext(entry.name)[1]
except Exception:
ext = ''
try:
matches = rules.match(entry_path, externals={'filename': entry.name, 'filepath': entry_path, 'extension': ext, 'filetype': entry.type})
except yara.Error as e:
self.log('error', "Yara scan for file {} ({}) failed: {}".format(entry.name, entry.sha256, e))
continue
for match in matches:
found = True
# Add a row for each string matched by the rule.
if arg_verbose:
for match_string in match.strings:
rows.append([
match.rule,
string_printable(match_string[1]),
string_printable(match_string[0]),
string_printable(match_string[2])]
)
else:
self.log('item', match.rule)
# Add matching rules to our list of tags.
# First it checks if there are tags specified in the metadata
# of the Yara rule.
match_tags = match.meta.get('tags')
# If not, use the rule name.
# TODO: as we add more and more yara rules, we might remove
# this option and only tag the file with rules that had
# tags specified in them.
if not match_tags:
match_tags = match.rule
# Add the tags to the list.
tag_list.append([entry.sha256, match_tags])
if arg_verbose and rows:
header = [
'Rule',
'String',
'Offset',
'Content'
]
self.log('table', dict(header=header, rows=rows))
# If we selected to add tags do that now.
if found and arg_tag:
db = Database()
for tag in tag_list:
db.add_tags(tag[0], tag[1])
# If in a session reset the session to see tags.
if __sessions__.is_set() and not arg_scan_all:
self.log('info', "Refreshing session to update attributes...")
__sessions__.new(__sessions__.current.file.path)
def rules(self):
arg_edit = self.args.edit
arg_update = self.args.update
rules = self._get_rules()
# If the user wants to edit a specific rule, loop through all of them
# identify which one to open, and launch the default editor.
if arg_edit:
for rule in rules:
if int(arg_edit) == rule[0]:
os.system('"${EDITOR:-nano}" ' + rule[1])
break
# Otherwise, just print the list.
# Check if the user wants to update rules.
elif arg_update:
# FIrst we create the local rules folder in case it doesn't exist.
if not os.path.exists(self.local_rules):
os.makedirs(self.local_rules)
# TODO: we definitely need a way for Config to parse lists appropriately.
urls = cfg.yara.repositories.split('\n')
for url in urls:
url = url.strip()
self.log('info', "Updating Yara rules from repository {}".format(url))
repo_name = url.rsplit('/', 1)[-1].rstrip('.git')
repo_path = os.path.join(self.local_rules, repo_name)
# If the repository has been cloned before, we gonna update it.
if os.path.exists(repo_path):
proc = subprocess.Popen(['git', 'pull'], cwd=repo_path)
# Otherwise, do first clone.
else:
proc = subprocess.Popen(['git', 'clone', url], cwd=self.local_rules)
proc.wait()
else:
self.log('table', dict(header=['#', 'Path'], rows=rules))
self.log('', "")
self.log('', "You can edit these rules by specifying --edit and the #")
def run(self):
super(YaraScan, self).run()
if self.args is None:
return
if not HAVE_YARA:
self.log('error', "Missing dependency, install yara")
return
if self.args.subname == 'scan':
self.scan()
elif self.args.subname == 'rules':
self.rules()
else:
self.log('error', 'At least one of the parameters is required')
self.usage()
|
en
| 0.910558
|
# -*- coding: utf-8 -*- # This file is part of Viper - https://github.com/viper-framework/viper # See the file 'LICENSE' for copying permission. # Retrieve the list of rules and populate a list. # We loop through all rules paths (both in share as well as locally) # and we populate the list of rules. # Skip if the extension is not right, could cause problems. # If a rule file is specified we compile that, otherwise all # the rules we have stored locally. # Check if the selected ruleset actually exists. # Otherwise, we get all the rules that are stored locally and we # load them in different namespaces. # TODO: We pre-compile all rules individually to check whether they are # loadable or not. This is pretty hacky, there must be a better way. # Files to scan. # If there is a session open and the user didn't specifically # request to scan the full repository, we just add the currently # opened file's path. # Otherwise we loop through all files in the repository and queue # them up for scan. # Loop through all files to be scanned. # Check if the entry has a path attribute. This happens when # there is a session open. We need to distinguish this just for # the cases where we're scanning an opened file which has not been # stored yet. # This should be triggered only when scanning the full repository. # Check if the file exists before running the yara scan. # We need this just for some Yara rules. # Add a row for each string matched by the rule. # Add matching rules to our list of tags. # First it checks if there are tags specified in the metadata # of the Yara rule. # If not, use the rule name. # TODO: as we add more and more yara rules, we might remove # this option and only tag the file with rules that had # tags specified in them. # Add the tags to the list. # If we selected to add tags do that now. # If in a session reset the session to see tags. # If the user wants to edit a specific rule, loop through all of them # identify which one to open, and launch the default editor. # Otherwise, just print the list. # Check if the user wants to update rules. # FIrst we create the local rules folder in case it doesn't exist. # TODO: we definitely need a way for Config to parse lists appropriately. # If the repository has been cloned before, we gonna update it. # Otherwise, do first clone. #")
| 2.158696
| 2
|
gwells/test_forms.py
|
fieranmason/gwells
| 1
|
6629737
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .forms import *
from django.test import TestCase
#TODO split tests into one file per class
class FormsTestCase(TestCase):
def test_activity_submission_comment_form_instantiation(self):
activity_submission_comment_form = ActivitySubmissionCommentForm()
self.assertIsInstance(activity_submission_comment_form, ActivitySubmissionCommentForm)
def test_activity_submission_development_form_instantiation(self):
test_activity_submission_development_form = ActivitySubmissionDevelopmentForm()
self.assertIsInstance(test_activity_submission_development_form, ActivitySubmissionDevelopmentForm)
def test_activity_submission_filter_pack_form_instantiation(self):
activity_submission_filter_pack_form = ActivitySubmissionFilterPackForm()
self.assertIsInstance(activity_submission_filter_pack_form, ActivitySubmissionFilterPackForm)
def test_activity_submission_gps_form_instantiation(self):
activity_submission_gps_form = ActivitySubmissionGpsForm()
self.assertIsInstance(activity_submission_gps_form, ActivitySubmissionGpsForm)
def test_activity_submission_location_form_instantiation(self):
activity_submission_location_form = ActivitySubmissionLocationForm()
self.assertIsInstance(activity_submission_location_form, ActivitySubmissionLocationForm)
def test_activity_submission_screen_intake_form_instantiation(self):
activity_submission_screen_intake_form = ActivitySubmissionScreenIntakeForm()
self.assertIsInstance(activity_submission_screen_intake_form,ActivitySubmissionScreenIntakeForm)
def test_activity_submission_surface_seal_form_instantiation(self):
activity_submission_surface_seal_form = ActivitySubmissionSurfaceSealForm()
self.assertIsInstance(activity_submission_surface_seal_form, ActivitySubmissionSurfaceSealForm)
def test_activity_submission_type_and_class_form_instantiation(self):
activity_submission_type_and_class_form = ActivitySubmissionTypeAndClassForm()
self.assertIsInstance(activity_submission_type_and_class_form, ActivitySubmissionTypeAndClassForm)
def test_activity_submission_water_quality_form_instantiation(self):
test_activity_submission_water_quality_form = ActivitySubmissionWaterQualityForm()
self.assertIsInstance(test_activity_submission_water_quality_form, ActivitySubmissionWaterQualityForm)
def test_casing_form_instantiation(self):
casing_form = CasingForm()
self.assertIsInstance(casing_form, CasingForm)
def test_liner_perforation_form_instantiation(self):
liner_perforation_form = LinerPerforationForm()
self.assertIsInstance(liner_perforation_form, LinerPerforationForm)
def test_lithology_form_instantiation(self):
lithology_form = LithologyForm()
self.assertIsInstance(lithology_form, LithologyForm)
def test_production_data_form_instantiation(self):
production_data_form = ProductionDataForm()
self.assertIsInstance(production_data_form, ProductionDataForm)
def test_screen_form_instantiation(self):
screen_form = ScreenForm()
self.assertIsInstance(screen_form, ScreenForm)
def test_search_form_instantiation(self):
search_form = SearchForm()
self.assertIsInstance(search_form, SearchForm)
def test_well_completion_form_instantiation(self):
well_completion_form = WellCompletionForm(initial={'well_class_code':None})
self.assertIsInstance(well_completion_form, WellCompletionForm)
def test_well_owner_form_instantiation(self):
well_owner_form = WellOwnerForm()
self.assertIsInstance(well_owner_form, WellOwnerForm)
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .forms import *
from django.test import TestCase
#TODO split tests into one file per class
class FormsTestCase(TestCase):
def test_activity_submission_comment_form_instantiation(self):
activity_submission_comment_form = ActivitySubmissionCommentForm()
self.assertIsInstance(activity_submission_comment_form, ActivitySubmissionCommentForm)
def test_activity_submission_development_form_instantiation(self):
test_activity_submission_development_form = ActivitySubmissionDevelopmentForm()
self.assertIsInstance(test_activity_submission_development_form, ActivitySubmissionDevelopmentForm)
def test_activity_submission_filter_pack_form_instantiation(self):
activity_submission_filter_pack_form = ActivitySubmissionFilterPackForm()
self.assertIsInstance(activity_submission_filter_pack_form, ActivitySubmissionFilterPackForm)
def test_activity_submission_gps_form_instantiation(self):
activity_submission_gps_form = ActivitySubmissionGpsForm()
self.assertIsInstance(activity_submission_gps_form, ActivitySubmissionGpsForm)
def test_activity_submission_location_form_instantiation(self):
activity_submission_location_form = ActivitySubmissionLocationForm()
self.assertIsInstance(activity_submission_location_form, ActivitySubmissionLocationForm)
def test_activity_submission_screen_intake_form_instantiation(self):
activity_submission_screen_intake_form = ActivitySubmissionScreenIntakeForm()
self.assertIsInstance(activity_submission_screen_intake_form,ActivitySubmissionScreenIntakeForm)
def test_activity_submission_surface_seal_form_instantiation(self):
activity_submission_surface_seal_form = ActivitySubmissionSurfaceSealForm()
self.assertIsInstance(activity_submission_surface_seal_form, ActivitySubmissionSurfaceSealForm)
def test_activity_submission_type_and_class_form_instantiation(self):
activity_submission_type_and_class_form = ActivitySubmissionTypeAndClassForm()
self.assertIsInstance(activity_submission_type_and_class_form, ActivitySubmissionTypeAndClassForm)
def test_activity_submission_water_quality_form_instantiation(self):
test_activity_submission_water_quality_form = ActivitySubmissionWaterQualityForm()
self.assertIsInstance(test_activity_submission_water_quality_form, ActivitySubmissionWaterQualityForm)
def test_casing_form_instantiation(self):
casing_form = CasingForm()
self.assertIsInstance(casing_form, CasingForm)
def test_liner_perforation_form_instantiation(self):
liner_perforation_form = LinerPerforationForm()
self.assertIsInstance(liner_perforation_form, LinerPerforationForm)
def test_lithology_form_instantiation(self):
lithology_form = LithologyForm()
self.assertIsInstance(lithology_form, LithologyForm)
def test_production_data_form_instantiation(self):
production_data_form = ProductionDataForm()
self.assertIsInstance(production_data_form, ProductionDataForm)
def test_screen_form_instantiation(self):
screen_form = ScreenForm()
self.assertIsInstance(screen_form, ScreenForm)
def test_search_form_instantiation(self):
search_form = SearchForm()
self.assertIsInstance(search_form, SearchForm)
def test_well_completion_form_instantiation(self):
well_completion_form = WellCompletionForm(initial={'well_class_code':None})
self.assertIsInstance(well_completion_form, WellCompletionForm)
def test_well_owner_form_instantiation(self):
well_owner_form = WellOwnerForm()
self.assertIsInstance(well_owner_form, WellOwnerForm)
|
en
| 0.86381
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. #TODO split tests into one file per class
| 1.844789
| 2
|
cauldron/ui/routes/notebooks/__init__.py
|
JohnnyPeng18/cauldron
| 90
|
6629738
|
<gh_stars>10-100
import mimetypes
import os
import cauldron
import flask
import requests
from cauldron import environ
from cauldron.ui import configs as ui_configs
blueprint = flask.Blueprint(
name='notebooks',
import_name=__name__,
url_prefix='{}/notebook'.format(ui_configs.ROOT_PREFIX)
)
def _get_remote_view(route: str) -> flask.Response:
endpoint = route.lstrip('/')
request = flask.request
response = requests.request(
method=request.method,
url='{}/view/{}'.format(environ.remote_connection.url, endpoint),
headers={k: v for k, v in request.headers if k != 'Host'},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False
)
excluded_headers = [
'connection',
'content-encoding',
'content-length',
'transfer-encoding',
]
headers = [
(name, value)
for name, value in response.raw.headers.items()
if name.lower() not in excluded_headers
]
return flask.Response(response.content, response.status_code, headers)
@blueprint.route('/<path:route>', methods=['GET', 'POST'])
def notebook(route: str):
"""
Retrieves the contents of the file specified by the view route if it
exists.
"""
is_remote = environ.remote_connection.active
load_from_resources = (
route.startswith('assets')
or (
is_remote
and route in ['project.css', 'project.js']
)
)
if load_from_resources:
# If a local version of the asset exists, send that from the
# resources directory instead of the results directory.
local_asset_path = environ.paths.resources('web', route)
if os.path.exists(local_asset_path):
return flask.send_file(
local_asset_path,
mimetype=mimetypes.guess_type(local_asset_path)[0],
cache_timeout=-1
)
if is_remote:
return _get_remote_view(route)
project = cauldron.project.get_internal_project()
results_path = project.results_path if project else None
if not project or not results_path:
return '', 204
path = os.path.join(results_path, route)
if not os.path.exists(path):
return '', 204
return flask.send_file(
path,
mimetype=mimetypes.guess_type(path)[0],
cache_timeout=-1
)
|
import mimetypes
import os
import cauldron
import flask
import requests
from cauldron import environ
from cauldron.ui import configs as ui_configs
blueprint = flask.Blueprint(
name='notebooks',
import_name=__name__,
url_prefix='{}/notebook'.format(ui_configs.ROOT_PREFIX)
)
def _get_remote_view(route: str) -> flask.Response:
endpoint = route.lstrip('/')
request = flask.request
response = requests.request(
method=request.method,
url='{}/view/{}'.format(environ.remote_connection.url, endpoint),
headers={k: v for k, v in request.headers if k != 'Host'},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False
)
excluded_headers = [
'connection',
'content-encoding',
'content-length',
'transfer-encoding',
]
headers = [
(name, value)
for name, value in response.raw.headers.items()
if name.lower() not in excluded_headers
]
return flask.Response(response.content, response.status_code, headers)
@blueprint.route('/<path:route>', methods=['GET', 'POST'])
def notebook(route: str):
"""
Retrieves the contents of the file specified by the view route if it
exists.
"""
is_remote = environ.remote_connection.active
load_from_resources = (
route.startswith('assets')
or (
is_remote
and route in ['project.css', 'project.js']
)
)
if load_from_resources:
# If a local version of the asset exists, send that from the
# resources directory instead of the results directory.
local_asset_path = environ.paths.resources('web', route)
if os.path.exists(local_asset_path):
return flask.send_file(
local_asset_path,
mimetype=mimetypes.guess_type(local_asset_path)[0],
cache_timeout=-1
)
if is_remote:
return _get_remote_view(route)
project = cauldron.project.get_internal_project()
results_path = project.results_path if project else None
if not project or not results_path:
return '', 204
path = os.path.join(results_path, route)
if not os.path.exists(path):
return '', 204
return flask.send_file(
path,
mimetype=mimetypes.guess_type(path)[0],
cache_timeout=-1
)
|
en
| 0.824977
|
Retrieves the contents of the file specified by the view route if it exists. # If a local version of the asset exists, send that from the # resources directory instead of the results directory.
| 2.381697
| 2
|
tests/test_sample.py
|
sempr/grpc-interceptor-headers
| 0
|
6629739
|
import grpc
import pytest
import random
from samples.greeter_client import do_call
from samples.greeter_server import get_serve
def test_pass():
bind = "0.0.0.0:{}".format(random.randint(60000, 61000))
serv = get_serve(bind=bind)
serv.start()
try:
resp = do_call(bind=bind, name="abc")
assert resp == 'Hello, abc!', "Normal Call Failed"
try:
resp = do_call(bind=bind, header_name="error")
assert False, "Should Be Failed"
except grpc._channel._Rendezvous as e:
assert e.code(
) == grpc.StatusCode.UNAUTHENTICATED, "Should be UNAUTHENTICATED code"
finally:
serv.stop(0)
|
import grpc
import pytest
import random
from samples.greeter_client import do_call
from samples.greeter_server import get_serve
def test_pass():
bind = "0.0.0.0:{}".format(random.randint(60000, 61000))
serv = get_serve(bind=bind)
serv.start()
try:
resp = do_call(bind=bind, name="abc")
assert resp == 'Hello, abc!', "Normal Call Failed"
try:
resp = do_call(bind=bind, header_name="error")
assert False, "Should Be Failed"
except grpc._channel._Rendezvous as e:
assert e.code(
) == grpc.StatusCode.UNAUTHENTICATED, "Should be UNAUTHENTICATED code"
finally:
serv.stop(0)
|
none
| 1
| 2.007513
| 2
|
|
kindle/weather-stand/local/lib/python2.7/ctypes/_endian.py
|
wgxdz/kindle-weather-stand-alone
| 0
|
6629740
|
<gh_stars>0
######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
import sys
from ctypes import *
_array_type = type(c_int * 3)
def _other_endian(typ):
"""Return the type with the 'other' byte order. Simple types like
c_int and so on already have __ctype_be__ and __ctype_le__
attributes which contain the types, for more complicated types
arrays and structures are supported.
"""
# check _OTHER_ENDIAN attribute (present if typ is primitive type)
if hasattr(typ, _OTHER_ENDIAN):
return getattr(typ, _OTHER_ENDIAN)
# if typ is array
if isinstance(typ, _array_type):
return _other_endian(typ._type_) * typ._length_
# if typ is structure
if issubclass(typ, Structure):
return typ
raise TypeError("This type does not support other endian: %s" % typ)
class _swapped_meta(type(Structure)):
def __setattr__(self, attrname, value):
if attrname == "_fields_":
fields = []
for desc in value:
name = desc[0]
typ = desc[1]
rest = desc[2:]
fields.append((name, _other_endian(typ)) + rest)
value = fields
super(_swapped_meta, self).__setattr__(attrname, value)
################################################################
# Note: The Structure metaclass checks for the *presence* (not the
# value!) of a _swapped_bytes_ attribute to determine the bit order in
# structures containing bit fields.
if sys.byteorder == "little":
_OTHER_ENDIAN = "__ctype_be__"
LittleEndianStructure = Structure
class BigEndianStructure(Structure):
"""Structure with big endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
elif sys.byteorder == "big":
_OTHER_ENDIAN = "__ctype_le__"
BigEndianStructure = Structure
class LittleEndianStructure(Structure):
"""Structure with little endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
else:
raise RuntimeError("Invalid byteorder")
|
######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
import sys
from ctypes import *
_array_type = type(c_int * 3)
def _other_endian(typ):
"""Return the type with the 'other' byte order. Simple types like
c_int and so on already have __ctype_be__ and __ctype_le__
attributes which contain the types, for more complicated types
arrays and structures are supported.
"""
# check _OTHER_ENDIAN attribute (present if typ is primitive type)
if hasattr(typ, _OTHER_ENDIAN):
return getattr(typ, _OTHER_ENDIAN)
# if typ is array
if isinstance(typ, _array_type):
return _other_endian(typ._type_) * typ._length_
# if typ is structure
if issubclass(typ, Structure):
return typ
raise TypeError("This type does not support other endian: %s" % typ)
class _swapped_meta(type(Structure)):
def __setattr__(self, attrname, value):
if attrname == "_fields_":
fields = []
for desc in value:
name = desc[0]
typ = desc[1]
rest = desc[2:]
fields.append((name, _other_endian(typ)) + rest)
value = fields
super(_swapped_meta, self).__setattr__(attrname, value)
################################################################
# Note: The Structure metaclass checks for the *presence* (not the
# value!) of a _swapped_bytes_ attribute to determine the bit order in
# structures containing bit fields.
if sys.byteorder == "little":
_OTHER_ENDIAN = "__ctype_be__"
LittleEndianStructure = Structure
class BigEndianStructure(Structure):
"""Structure with big endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
elif sys.byteorder == "big":
_OTHER_ENDIAN = "__ctype_le__"
BigEndianStructure = Structure
class LittleEndianStructure(Structure):
"""Structure with little endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
else:
raise RuntimeError("Invalid byteorder")
|
en
| 0.468748
|
###################################################################### # This file should be kept compatible with Python 2.3, see PEP 291. # ###################################################################### Return the type with the 'other' byte order. Simple types like c_int and so on already have __ctype_be__ and __ctype_le__ attributes which contain the types, for more complicated types arrays and structures are supported. # check _OTHER_ENDIAN attribute (present if typ is primitive type) # if typ is array # if typ is structure ################################################################ # Note: The Structure metaclass checks for the *presence* (not the # value!) of a _swapped_bytes_ attribute to determine the bit order in # structures containing bit fields. Structure with big endian byte order Structure with little endian byte order
| 2.950446
| 3
|
iterators_examples/_5_tic_tac_toe_generator.py
|
rmotr-source/free-class-iterators-generators
| 0
|
6629741
|
def tic_tac_toe():
words = ['tic', 'tac', 'toe']
for word in words:
yield word
for elem in tic_tac_toe():
print(elem)
|
def tic_tac_toe():
words = ['tic', 'tac', 'toe']
for word in words:
yield word
for elem in tic_tac_toe():
print(elem)
|
none
| 1
| 3.612141
| 4
|
|
the_basics/basics3.py
|
XinyanLi23/python_basics
| 0
|
6629742
|
# $ content split
# * title
# tm terminal
# td treads
# n note
# ! important
# alternative
#$ section 7 Loops
#* for loops
monday_temperatures = [9.1, 8.8, 7.6]
#print(round(monday_temperatures[0]))
#print(round(monday_temperatures[1]))
#print(round(monday_temperatures[2]))
for temperature in monday_temperatures: #n for loop creates a variable(temperature) and it goes all items one by one
print(round(temperature))
print("Done")
for letter in 'hello':
print(letter.title()) #n title() makes a string with each word is titlecased
#ex.
colors = [11, 34.1, 98.2, 43, 45.1, 54, 54]
for item in colors:
if isinstance(item, int):
print(item)
#* loop through a dictionary
student_grades = {"Marry": 9.1, "Sim": 8.8, "John": 7.5}
for grades in student_grades.items():
print(grades)
for grades in student_grades.keys():
print(grades)
for grades in student_grades.values():
print(grades)
#ex1.
phone_numbers = {"<NAME>": "+37682929928", "<NAME>": "+423998200919"}
for key, value in phone_numbers.items():
print("{}: {}".format(key, value))
#ex2.
phone_numbers = {"<NAME>": "+37682929928", "<NAME>": "+423998200919"}
for numbers in phone_numbers.values():
print(numbers.replace("+", "00"))
#* while loops
a = 3
#while a > 0: #n as long as the line is true, print 1
# print(1) #n ctrl c to exit the execute
#while True:
# print(1)
#ex.
username = ''
while username != "pypy":
username = input("Enter username: ") #n the input continues to ask until inputs the true username
#* while loops with break and continue
while True:
username = input("Enter username: ")
if username == 'pypy':
break
else:
continue
#$ Application
#try by myself
conclusion = ""
while True:
say_something = input("Say something: ")
if say_something == '\end':
break
else:
conclusion = conclusion + say_something
continue
print(conclusion)
#$ build the maker func
#def sentence_maker(phrase):
#capitalized = phrase.capitalize()
#if phrase.startswith(("how","what","why"))
# better
def sentence_maker(phrase):
interrogatives = ("how","what","why") #n 疑问词
capitalized = phrase.capitalize()
if phrase.startswith((interrogatives)):
return "{}?".format(capitalized) #n return the string "{}"
else:
return "{}.".format(capitalized)
print(sentence_maker("how are you"))
#$ constructing the loop
#! ex
def sentence_maker(phrase):
interrogatives = ("how","what","why")
capitalized = phrase.capitalize()
if phrase.startswith((interrogatives)):
return "{}?".format(capitalized)
else:
return "{}.".format(capitalized)
results = [] # td3 store phrases that the user enters in a list, so we start with an empty list outside the loop and the python will execute the func definition
while True: # td1 conditional loop - while
user_input = input("Say something: ")
if user_input == "\end":
break
else: # td2 sentences look like many items - lists
#results.append(user_input) # td4 need to convert the string to the func, so add the def in
results.append(sentence_maker(user_input))
print(" ".join(results)) # td5 use ".join" to join the sentences togethere
# steps:
# 1. def a function
# 2. then iterate, make a loop, ask user for input in each iteration, immediately check each input first!
# 3. store the output of the census maker in to the results list
# 4. concatenate the items of the list and print!
|
# $ content split
# * title
# tm terminal
# td treads
# n note
# ! important
# alternative
#$ section 7 Loops
#* for loops
monday_temperatures = [9.1, 8.8, 7.6]
#print(round(monday_temperatures[0]))
#print(round(monday_temperatures[1]))
#print(round(monday_temperatures[2]))
for temperature in monday_temperatures: #n for loop creates a variable(temperature) and it goes all items one by one
print(round(temperature))
print("Done")
for letter in 'hello':
print(letter.title()) #n title() makes a string with each word is titlecased
#ex.
colors = [11, 34.1, 98.2, 43, 45.1, 54, 54]
for item in colors:
if isinstance(item, int):
print(item)
#* loop through a dictionary
student_grades = {"Marry": 9.1, "Sim": 8.8, "John": 7.5}
for grades in student_grades.items():
print(grades)
for grades in student_grades.keys():
print(grades)
for grades in student_grades.values():
print(grades)
#ex1.
phone_numbers = {"<NAME>": "+37682929928", "<NAME>": "+423998200919"}
for key, value in phone_numbers.items():
print("{}: {}".format(key, value))
#ex2.
phone_numbers = {"<NAME>": "+37682929928", "<NAME>": "+423998200919"}
for numbers in phone_numbers.values():
print(numbers.replace("+", "00"))
#* while loops
a = 3
#while a > 0: #n as long as the line is true, print 1
# print(1) #n ctrl c to exit the execute
#while True:
# print(1)
#ex.
username = ''
while username != "pypy":
username = input("Enter username: ") #n the input continues to ask until inputs the true username
#* while loops with break and continue
while True:
username = input("Enter username: ")
if username == 'pypy':
break
else:
continue
#$ Application
#try by myself
conclusion = ""
while True:
say_something = input("Say something: ")
if say_something == '\end':
break
else:
conclusion = conclusion + say_something
continue
print(conclusion)
#$ build the maker func
#def sentence_maker(phrase):
#capitalized = phrase.capitalize()
#if phrase.startswith(("how","what","why"))
# better
def sentence_maker(phrase):
interrogatives = ("how","what","why") #n 疑问词
capitalized = phrase.capitalize()
if phrase.startswith((interrogatives)):
return "{}?".format(capitalized) #n return the string "{}"
else:
return "{}.".format(capitalized)
print(sentence_maker("how are you"))
#$ constructing the loop
#! ex
def sentence_maker(phrase):
interrogatives = ("how","what","why")
capitalized = phrase.capitalize()
if phrase.startswith((interrogatives)):
return "{}?".format(capitalized)
else:
return "{}.".format(capitalized)
results = [] # td3 store phrases that the user enters in a list, so we start with an empty list outside the loop and the python will execute the func definition
while True: # td1 conditional loop - while
user_input = input("Say something: ")
if user_input == "\end":
break
else: # td2 sentences look like many items - lists
#results.append(user_input) # td4 need to convert the string to the func, so add the def in
results.append(sentence_maker(user_input))
print(" ".join(results)) # td5 use ".join" to join the sentences togethere
# steps:
# 1. def a function
# 2. then iterate, make a loop, ask user for input in each iteration, immediately check each input first!
# 3. store the output of the census maker in to the results list
# 4. concatenate the items of the list and print!
|
en
| 0.708141
|
# $ content split # * title # tm terminal # td treads # n note # ! important # alternative #$ section 7 Loops #* for loops #print(round(monday_temperatures[0])) #print(round(monday_temperatures[1])) #print(round(monday_temperatures[2])) #n for loop creates a variable(temperature) and it goes all items one by one #n title() makes a string with each word is titlecased #ex. #* loop through a dictionary #ex1. #ex2. #* while loops #while a > 0: #n as long as the line is true, print 1 # print(1) #n ctrl c to exit the execute #while True: # print(1) #ex. #n the input continues to ask until inputs the true username #* while loops with break and continue #$ Application #try by myself #$ build the maker func #def sentence_maker(phrase): #capitalized = phrase.capitalize() #if phrase.startswith(("how","what","why")) # better #n 疑问词 #n return the string "{}" #$ constructing the loop #! ex # td3 store phrases that the user enters in a list, so we start with an empty list outside the loop and the python will execute the func definition # td1 conditional loop - while # td2 sentences look like many items - lists #results.append(user_input) # td4 need to convert the string to the func, so add the def in # td5 use ".join" to join the sentences togethere # steps: # 1. def a function # 2. then iterate, make a loop, ask user for input in each iteration, immediately check each input first! # 3. store the output of the census maker in to the results list # 4. concatenate the items of the list and print!
| 4.058735
| 4
|
sdk/python/pulumi_aiven/vpc_peering_connection.py
|
pulumi/pulumi-aiven
| 7
|
6629743
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['VpcPeeringConnectionArgs', 'VpcPeeringConnection']
@pulumi.input_type
class VpcPeeringConnectionArgs:
def __init__(__self__, *,
peer_cloud_account: pulumi.Input[str],
peer_vpc: pulumi.Input[str],
vpc_id: pulumi.Input[str],
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VpcPeeringConnection resource.
:param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being
peered with.
:param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC.
:param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with.
:param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
:param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form.
:param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC.
:param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC.
"""
pulumi.set(__self__, "peer_cloud_account", peer_cloud_account)
pulumi.set(__self__, "peer_vpc", peer_vpc)
pulumi.set(__self__, "vpc_id", vpc_id)
if peer_azure_app_id is not None:
pulumi.set(__self__, "peer_azure_app_id", peer_azure_app_id)
if peer_azure_tenant_id is not None:
pulumi.set(__self__, "peer_azure_tenant_id", peer_azure_tenant_id)
if peer_region is not None:
pulumi.set(__self__, "peer_region", peer_region)
if peer_resource_group is not None:
pulumi.set(__self__, "peer_resource_group", peer_resource_group)
@property
@pulumi.getter(name="peerCloudAccount")
def peer_cloud_account(self) -> pulumi.Input[str]:
"""
defines the identifier of the cloud account the VPC is being
peered with.
"""
return pulumi.get(self, "peer_cloud_account")
@peer_cloud_account.setter
def peer_cloud_account(self, value: pulumi.Input[str]):
pulumi.set(self, "peer_cloud_account", value)
@property
@pulumi.getter(name="peerVpc")
def peer_vpc(self) -> pulumi.Input[str]:
"""
defines the identifier or name of the remote VPC.
"""
return pulumi.get(self, "peer_vpc")
@peer_vpc.setter
def peer_vpc(self, value: pulumi.Input[str]):
pulumi.set(self, "peer_vpc", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
is the Aiven VPC the peering connection is associated with.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="peerAzureAppId")
def peer_azure_app_id(self) -> Optional[pulumi.Input[str]]:
"""
an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
"""
return pulumi.get(self, "peer_azure_app_id")
@peer_azure_app_id.setter
def peer_azure_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_azure_app_id", value)
@property
@pulumi.getter(name="peerAzureTenantId")
def peer_azure_tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
an Azure tenant id in UUID4 form.
"""
return pulumi.get(self, "peer_azure_tenant_id")
@peer_azure_tenant_id.setter
def peer_azure_tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_azure_tenant_id", value)
@property
@pulumi.getter(name="peerRegion")
def peer_region(self) -> Optional[pulumi.Input[str]]:
"""
defines the region of the remote VPC if it is not in the same region as Aiven VPC.
"""
return pulumi.get(self, "peer_region")
@peer_region.setter
def peer_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_region", value)
@property
@pulumi.getter(name="peerResourceGroup")
def peer_resource_group(self) -> Optional[pulumi.Input[str]]:
"""
an Azure resource group name of the peered VPC.
"""
return pulumi.get(self, "peer_resource_group")
@peer_resource_group.setter
def peer_resource_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_resource_group", value)
@pulumi.input_type
class _VpcPeeringConnectionState:
def __init__(__self__, *,
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_cloud_account: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None,
peer_vpc: Optional[pulumi.Input[str]] = None,
peering_connection_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
state_info: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VpcPeeringConnection resources.
:param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
:param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form.
:param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being
peered with.
:param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC.
:param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC.
:param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC.
:param pulumi.Input[str] peering_connection_id: a cloud provider identifier for the peering connection if available.
:param pulumi.Input[str] state: is the state of the peering connection. This property is computed by Aiven
therefore cannot be set, only read. Where state can be one of: `APPROVED`,
`PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and
`INVALID_SPECIFICATION`.
:param pulumi.Input[Mapping[str, Any]] state_info: state-specific help or error information.
:param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with.
"""
if peer_azure_app_id is not None:
pulumi.set(__self__, "peer_azure_app_id", peer_azure_app_id)
if peer_azure_tenant_id is not None:
pulumi.set(__self__, "peer_azure_tenant_id", peer_azure_tenant_id)
if peer_cloud_account is not None:
pulumi.set(__self__, "peer_cloud_account", peer_cloud_account)
if peer_region is not None:
pulumi.set(__self__, "peer_region", peer_region)
if peer_resource_group is not None:
pulumi.set(__self__, "peer_resource_group", peer_resource_group)
if peer_vpc is not None:
pulumi.set(__self__, "peer_vpc", peer_vpc)
if peering_connection_id is not None:
pulumi.set(__self__, "peering_connection_id", peering_connection_id)
if state is not None:
pulumi.set(__self__, "state", state)
if state_info is not None:
pulumi.set(__self__, "state_info", state_info)
if vpc_id is not None:
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter(name="peerAzureAppId")
def peer_azure_app_id(self) -> Optional[pulumi.Input[str]]:
"""
an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
"""
return pulumi.get(self, "peer_azure_app_id")
@peer_azure_app_id.setter
def peer_azure_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_azure_app_id", value)
@property
@pulumi.getter(name="peerAzureTenantId")
def peer_azure_tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
an Azure tenant id in UUID4 form.
"""
return pulumi.get(self, "peer_azure_tenant_id")
@peer_azure_tenant_id.setter
def peer_azure_tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_azure_tenant_id", value)
@property
@pulumi.getter(name="peerCloudAccount")
def peer_cloud_account(self) -> Optional[pulumi.Input[str]]:
"""
defines the identifier of the cloud account the VPC is being
peered with.
"""
return pulumi.get(self, "peer_cloud_account")
@peer_cloud_account.setter
def peer_cloud_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_cloud_account", value)
@property
@pulumi.getter(name="peerRegion")
def peer_region(self) -> Optional[pulumi.Input[str]]:
"""
defines the region of the remote VPC if it is not in the same region as Aiven VPC.
"""
return pulumi.get(self, "peer_region")
@peer_region.setter
def peer_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_region", value)
@property
@pulumi.getter(name="peerResourceGroup")
def peer_resource_group(self) -> Optional[pulumi.Input[str]]:
"""
an Azure resource group name of the peered VPC.
"""
return pulumi.get(self, "peer_resource_group")
@peer_resource_group.setter
def peer_resource_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_resource_group", value)
@property
@pulumi.getter(name="peerVpc")
def peer_vpc(self) -> Optional[pulumi.Input[str]]:
"""
defines the identifier or name of the remote VPC.
"""
return pulumi.get(self, "peer_vpc")
@peer_vpc.setter
def peer_vpc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_vpc", value)
@property
@pulumi.getter(name="peeringConnectionId")
def peering_connection_id(self) -> Optional[pulumi.Input[str]]:
"""
a cloud provider identifier for the peering connection if available.
"""
return pulumi.get(self, "peering_connection_id")
@peering_connection_id.setter
def peering_connection_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peering_connection_id", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
is the state of the peering connection. This property is computed by Aiven
therefore cannot be set, only read. Where state can be one of: `APPROVED`,
`PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and
`INVALID_SPECIFICATION`.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="stateInfo")
def state_info(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
state-specific help or error information.
"""
return pulumi.get(self, "state_info")
@state_info.setter
def state_info(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "state_info", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
is the Aiven VPC the peering connection is associated with.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_id", value)
class VpcPeeringConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_cloud_account: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None,
peer_vpc: Optional[pulumi.Input[str]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # VPC Peering Connection Resource
The VPC Peering Connection resource allows the creation and management of Aiven VPC Peering Connections.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
:param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form.
:param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being
peered with.
:param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC.
:param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC.
:param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC.
:param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpcPeeringConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # VPC Peering Connection Resource
The VPC Peering Connection resource allows the creation and management of Aiven VPC Peering Connections.
:param str resource_name: The name of the resource.
:param VpcPeeringConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpcPeeringConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_cloud_account: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None,
peer_vpc: Optional[pulumi.Input[str]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpcPeeringConnectionArgs.__new__(VpcPeeringConnectionArgs)
__props__.__dict__["peer_azure_app_id"] = peer_azure_app_id
__props__.__dict__["peer_azure_tenant_id"] = peer_azure_tenant_id
if peer_cloud_account is None and not opts.urn:
raise TypeError("Missing required property 'peer_cloud_account'")
__props__.__dict__["peer_cloud_account"] = peer_cloud_account
__props__.__dict__["peer_region"] = peer_region
__props__.__dict__["peer_resource_group"] = peer_resource_group
if peer_vpc is None and not opts.urn:
raise TypeError("Missing required property 'peer_vpc'")
__props__.__dict__["peer_vpc"] = peer_vpc
if vpc_id is None and not opts.urn:
raise TypeError("Missing required property 'vpc_id'")
__props__.__dict__["vpc_id"] = vpc_id
__props__.__dict__["peering_connection_id"] = None
__props__.__dict__["state"] = None
__props__.__dict__["state_info"] = None
super(VpcPeeringConnection, __self__).__init__(
'aiven:index/vpcPeeringConnection:VpcPeeringConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_cloud_account: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None,
peer_vpc: Optional[pulumi.Input[str]] = None,
peering_connection_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
state_info: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None) -> 'VpcPeeringConnection':
"""
Get an existing VpcPeeringConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
:param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form.
:param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being
peered with.
:param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC.
:param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC.
:param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC.
:param pulumi.Input[str] peering_connection_id: a cloud provider identifier for the peering connection if available.
:param pulumi.Input[str] state: is the state of the peering connection. This property is computed by Aiven
therefore cannot be set, only read. Where state can be one of: `APPROVED`,
`PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and
`INVALID_SPECIFICATION`.
:param pulumi.Input[Mapping[str, Any]] state_info: state-specific help or error information.
:param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VpcPeeringConnectionState.__new__(_VpcPeeringConnectionState)
__props__.__dict__["peer_azure_app_id"] = peer_azure_app_id
__props__.__dict__["peer_azure_tenant_id"] = peer_azure_tenant_id
__props__.__dict__["peer_cloud_account"] = peer_cloud_account
__props__.__dict__["peer_region"] = peer_region
__props__.__dict__["peer_resource_group"] = peer_resource_group
__props__.__dict__["peer_vpc"] = peer_vpc
__props__.__dict__["peering_connection_id"] = peering_connection_id
__props__.__dict__["state"] = state
__props__.__dict__["state_info"] = state_info
__props__.__dict__["vpc_id"] = vpc_id
return VpcPeeringConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="peerAzureAppId")
def peer_azure_app_id(self) -> pulumi.Output[Optional[str]]:
"""
an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
"""
return pulumi.get(self, "peer_azure_app_id")
@property
@pulumi.getter(name="peerAzureTenantId")
def peer_azure_tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
an Azure tenant id in UUID4 form.
"""
return pulumi.get(self, "peer_azure_tenant_id")
@property
@pulumi.getter(name="peerCloudAccount")
def peer_cloud_account(self) -> pulumi.Output[str]:
"""
defines the identifier of the cloud account the VPC is being
peered with.
"""
return pulumi.get(self, "peer_cloud_account")
@property
@pulumi.getter(name="peerRegion")
def peer_region(self) -> pulumi.Output[Optional[str]]:
"""
defines the region of the remote VPC if it is not in the same region as Aiven VPC.
"""
return pulumi.get(self, "peer_region")
@property
@pulumi.getter(name="peerResourceGroup")
def peer_resource_group(self) -> pulumi.Output[Optional[str]]:
"""
an Azure resource group name of the peered VPC.
"""
return pulumi.get(self, "peer_resource_group")
@property
@pulumi.getter(name="peerVpc")
def peer_vpc(self) -> pulumi.Output[str]:
"""
defines the identifier or name of the remote VPC.
"""
return pulumi.get(self, "peer_vpc")
@property
@pulumi.getter(name="peeringConnectionId")
def peering_connection_id(self) -> pulumi.Output[str]:
"""
a cloud provider identifier for the peering connection if available.
"""
return pulumi.get(self, "peering_connection_id")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
is the state of the peering connection. This property is computed by Aiven
therefore cannot be set, only read. Where state can be one of: `APPROVED`,
`PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and
`INVALID_SPECIFICATION`.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="stateInfo")
def state_info(self) -> pulumi.Output[Mapping[str, Any]]:
"""
state-specific help or error information.
"""
return pulumi.get(self, "state_info")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Output[str]:
"""
is the Aiven VPC the peering connection is associated with.
"""
return pulumi.get(self, "vpc_id")
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['VpcPeeringConnectionArgs', 'VpcPeeringConnection']
@pulumi.input_type
class VpcPeeringConnectionArgs:
def __init__(__self__, *,
peer_cloud_account: pulumi.Input[str],
peer_vpc: pulumi.Input[str],
vpc_id: pulumi.Input[str],
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VpcPeeringConnection resource.
:param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being
peered with.
:param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC.
:param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with.
:param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
:param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form.
:param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC.
:param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC.
"""
pulumi.set(__self__, "peer_cloud_account", peer_cloud_account)
pulumi.set(__self__, "peer_vpc", peer_vpc)
pulumi.set(__self__, "vpc_id", vpc_id)
if peer_azure_app_id is not None:
pulumi.set(__self__, "peer_azure_app_id", peer_azure_app_id)
if peer_azure_tenant_id is not None:
pulumi.set(__self__, "peer_azure_tenant_id", peer_azure_tenant_id)
if peer_region is not None:
pulumi.set(__self__, "peer_region", peer_region)
if peer_resource_group is not None:
pulumi.set(__self__, "peer_resource_group", peer_resource_group)
@property
@pulumi.getter(name="peerCloudAccount")
def peer_cloud_account(self) -> pulumi.Input[str]:
"""
defines the identifier of the cloud account the VPC is being
peered with.
"""
return pulumi.get(self, "peer_cloud_account")
@peer_cloud_account.setter
def peer_cloud_account(self, value: pulumi.Input[str]):
pulumi.set(self, "peer_cloud_account", value)
@property
@pulumi.getter(name="peerVpc")
def peer_vpc(self) -> pulumi.Input[str]:
"""
defines the identifier or name of the remote VPC.
"""
return pulumi.get(self, "peer_vpc")
@peer_vpc.setter
def peer_vpc(self, value: pulumi.Input[str]):
pulumi.set(self, "peer_vpc", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
is the Aiven VPC the peering connection is associated with.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="peerAzureAppId")
def peer_azure_app_id(self) -> Optional[pulumi.Input[str]]:
"""
an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
"""
return pulumi.get(self, "peer_azure_app_id")
@peer_azure_app_id.setter
def peer_azure_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_azure_app_id", value)
@property
@pulumi.getter(name="peerAzureTenantId")
def peer_azure_tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
an Azure tenant id in UUID4 form.
"""
return pulumi.get(self, "peer_azure_tenant_id")
@peer_azure_tenant_id.setter
def peer_azure_tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_azure_tenant_id", value)
@property
@pulumi.getter(name="peerRegion")
def peer_region(self) -> Optional[pulumi.Input[str]]:
"""
defines the region of the remote VPC if it is not in the same region as Aiven VPC.
"""
return pulumi.get(self, "peer_region")
@peer_region.setter
def peer_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_region", value)
@property
@pulumi.getter(name="peerResourceGroup")
def peer_resource_group(self) -> Optional[pulumi.Input[str]]:
"""
an Azure resource group name of the peered VPC.
"""
return pulumi.get(self, "peer_resource_group")
@peer_resource_group.setter
def peer_resource_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_resource_group", value)
@pulumi.input_type
class _VpcPeeringConnectionState:
def __init__(__self__, *,
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_cloud_account: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None,
peer_vpc: Optional[pulumi.Input[str]] = None,
peering_connection_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
state_info: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VpcPeeringConnection resources.
:param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
:param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form.
:param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being
peered with.
:param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC.
:param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC.
:param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC.
:param pulumi.Input[str] peering_connection_id: a cloud provider identifier for the peering connection if available.
:param pulumi.Input[str] state: is the state of the peering connection. This property is computed by Aiven
therefore cannot be set, only read. Where state can be one of: `APPROVED`,
`PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and
`INVALID_SPECIFICATION`.
:param pulumi.Input[Mapping[str, Any]] state_info: state-specific help or error information.
:param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with.
"""
if peer_azure_app_id is not None:
pulumi.set(__self__, "peer_azure_app_id", peer_azure_app_id)
if peer_azure_tenant_id is not None:
pulumi.set(__self__, "peer_azure_tenant_id", peer_azure_tenant_id)
if peer_cloud_account is not None:
pulumi.set(__self__, "peer_cloud_account", peer_cloud_account)
if peer_region is not None:
pulumi.set(__self__, "peer_region", peer_region)
if peer_resource_group is not None:
pulumi.set(__self__, "peer_resource_group", peer_resource_group)
if peer_vpc is not None:
pulumi.set(__self__, "peer_vpc", peer_vpc)
if peering_connection_id is not None:
pulumi.set(__self__, "peering_connection_id", peering_connection_id)
if state is not None:
pulumi.set(__self__, "state", state)
if state_info is not None:
pulumi.set(__self__, "state_info", state_info)
if vpc_id is not None:
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter(name="peerAzureAppId")
def peer_azure_app_id(self) -> Optional[pulumi.Input[str]]:
"""
an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
"""
return pulumi.get(self, "peer_azure_app_id")
@peer_azure_app_id.setter
def peer_azure_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_azure_app_id", value)
@property
@pulumi.getter(name="peerAzureTenantId")
def peer_azure_tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
an Azure tenant id in UUID4 form.
"""
return pulumi.get(self, "peer_azure_tenant_id")
@peer_azure_tenant_id.setter
def peer_azure_tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_azure_tenant_id", value)
@property
@pulumi.getter(name="peerCloudAccount")
def peer_cloud_account(self) -> Optional[pulumi.Input[str]]:
"""
defines the identifier of the cloud account the VPC is being
peered with.
"""
return pulumi.get(self, "peer_cloud_account")
@peer_cloud_account.setter
def peer_cloud_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_cloud_account", value)
@property
@pulumi.getter(name="peerRegion")
def peer_region(self) -> Optional[pulumi.Input[str]]:
"""
defines the region of the remote VPC if it is not in the same region as Aiven VPC.
"""
return pulumi.get(self, "peer_region")
@peer_region.setter
def peer_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_region", value)
@property
@pulumi.getter(name="peerResourceGroup")
def peer_resource_group(self) -> Optional[pulumi.Input[str]]:
"""
an Azure resource group name of the peered VPC.
"""
return pulumi.get(self, "peer_resource_group")
@peer_resource_group.setter
def peer_resource_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_resource_group", value)
@property
@pulumi.getter(name="peerVpc")
def peer_vpc(self) -> Optional[pulumi.Input[str]]:
"""
defines the identifier or name of the remote VPC.
"""
return pulumi.get(self, "peer_vpc")
@peer_vpc.setter
def peer_vpc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_vpc", value)
@property
@pulumi.getter(name="peeringConnectionId")
def peering_connection_id(self) -> Optional[pulumi.Input[str]]:
"""
a cloud provider identifier for the peering connection if available.
"""
return pulumi.get(self, "peering_connection_id")
@peering_connection_id.setter
def peering_connection_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peering_connection_id", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
is the state of the peering connection. This property is computed by Aiven
therefore cannot be set, only read. Where state can be one of: `APPROVED`,
`PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and
`INVALID_SPECIFICATION`.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="stateInfo")
def state_info(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
state-specific help or error information.
"""
return pulumi.get(self, "state_info")
@state_info.setter
def state_info(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "state_info", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
is the Aiven VPC the peering connection is associated with.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_id", value)
class VpcPeeringConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_cloud_account: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None,
peer_vpc: Optional[pulumi.Input[str]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # VPC Peering Connection Resource
The VPC Peering Connection resource allows the creation and management of Aiven VPC Peering Connections.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
:param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form.
:param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being
peered with.
:param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC.
:param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC.
:param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC.
:param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpcPeeringConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # VPC Peering Connection Resource
The VPC Peering Connection resource allows the creation and management of Aiven VPC Peering Connections.
:param str resource_name: The name of the resource.
:param VpcPeeringConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpcPeeringConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_cloud_account: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None,
peer_vpc: Optional[pulumi.Input[str]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpcPeeringConnectionArgs.__new__(VpcPeeringConnectionArgs)
__props__.__dict__["peer_azure_app_id"] = peer_azure_app_id
__props__.__dict__["peer_azure_tenant_id"] = peer_azure_tenant_id
if peer_cloud_account is None and not opts.urn:
raise TypeError("Missing required property 'peer_cloud_account'")
__props__.__dict__["peer_cloud_account"] = peer_cloud_account
__props__.__dict__["peer_region"] = peer_region
__props__.__dict__["peer_resource_group"] = peer_resource_group
if peer_vpc is None and not opts.urn:
raise TypeError("Missing required property 'peer_vpc'")
__props__.__dict__["peer_vpc"] = peer_vpc
if vpc_id is None and not opts.urn:
raise TypeError("Missing required property 'vpc_id'")
__props__.__dict__["vpc_id"] = vpc_id
__props__.__dict__["peering_connection_id"] = None
__props__.__dict__["state"] = None
__props__.__dict__["state_info"] = None
super(VpcPeeringConnection, __self__).__init__(
'aiven:index/vpcPeeringConnection:VpcPeeringConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
peer_azure_app_id: Optional[pulumi.Input[str]] = None,
peer_azure_tenant_id: Optional[pulumi.Input[str]] = None,
peer_cloud_account: Optional[pulumi.Input[str]] = None,
peer_region: Optional[pulumi.Input[str]] = None,
peer_resource_group: Optional[pulumi.Input[str]] = None,
peer_vpc: Optional[pulumi.Input[str]] = None,
peering_connection_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
state_info: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None) -> 'VpcPeeringConnection':
"""
Get an existing VpcPeeringConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
:param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form.
:param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being
peered with.
:param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC.
:param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC.
:param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC.
:param pulumi.Input[str] peering_connection_id: a cloud provider identifier for the peering connection if available.
:param pulumi.Input[str] state: is the state of the peering connection. This property is computed by Aiven
therefore cannot be set, only read. Where state can be one of: `APPROVED`,
`PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and
`INVALID_SPECIFICATION`.
:param pulumi.Input[Mapping[str, Any]] state_info: state-specific help or error information.
:param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VpcPeeringConnectionState.__new__(_VpcPeeringConnectionState)
__props__.__dict__["peer_azure_app_id"] = peer_azure_app_id
__props__.__dict__["peer_azure_tenant_id"] = peer_azure_tenant_id
__props__.__dict__["peer_cloud_account"] = peer_cloud_account
__props__.__dict__["peer_region"] = peer_region
__props__.__dict__["peer_resource_group"] = peer_resource_group
__props__.__dict__["peer_vpc"] = peer_vpc
__props__.__dict__["peering_connection_id"] = peering_connection_id
__props__.__dict__["state"] = state
__props__.__dict__["state_info"] = state_info
__props__.__dict__["vpc_id"] = vpc_id
return VpcPeeringConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="peerAzureAppId")
def peer_azure_app_id(self) -> pulumi.Output[Optional[str]]:
"""
an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet.
"""
return pulumi.get(self, "peer_azure_app_id")
@property
@pulumi.getter(name="peerAzureTenantId")
def peer_azure_tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
an Azure tenant id in UUID4 form.
"""
return pulumi.get(self, "peer_azure_tenant_id")
@property
@pulumi.getter(name="peerCloudAccount")
def peer_cloud_account(self) -> pulumi.Output[str]:
"""
defines the identifier of the cloud account the VPC is being
peered with.
"""
return pulumi.get(self, "peer_cloud_account")
@property
@pulumi.getter(name="peerRegion")
def peer_region(self) -> pulumi.Output[Optional[str]]:
"""
defines the region of the remote VPC if it is not in the same region as Aiven VPC.
"""
return pulumi.get(self, "peer_region")
@property
@pulumi.getter(name="peerResourceGroup")
def peer_resource_group(self) -> pulumi.Output[Optional[str]]:
"""
an Azure resource group name of the peered VPC.
"""
return pulumi.get(self, "peer_resource_group")
@property
@pulumi.getter(name="peerVpc")
def peer_vpc(self) -> pulumi.Output[str]:
"""
defines the identifier or name of the remote VPC.
"""
return pulumi.get(self, "peer_vpc")
@property
@pulumi.getter(name="peeringConnectionId")
def peering_connection_id(self) -> pulumi.Output[str]:
"""
a cloud provider identifier for the peering connection if available.
"""
return pulumi.get(self, "peering_connection_id")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
is the state of the peering connection. This property is computed by Aiven
therefore cannot be set, only read. Where state can be one of: `APPROVED`,
`PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and
`INVALID_SPECIFICATION`.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="stateInfo")
def state_info(self) -> pulumi.Output[Mapping[str, Any]]:
"""
state-specific help or error information.
"""
return pulumi.get(self, "state_info")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Output[str]:
"""
is the Aiven VPC the peering connection is associated with.
"""
return pulumi.get(self, "vpc_id")
|
en
| 0.725342
|
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a VpcPeeringConnection resource. :param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being peered with. :param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC. :param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with. :param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet. :param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form. :param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC. :param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC. defines the identifier of the cloud account the VPC is being peered with. defines the identifier or name of the remote VPC. is the Aiven VPC the peering connection is associated with. an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet. an Azure tenant id in UUID4 form. defines the region of the remote VPC if it is not in the same region as Aiven VPC. an Azure resource group name of the peered VPC. Input properties used for looking up and filtering VpcPeeringConnection resources. :param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet. :param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form. :param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being peered with. :param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC. :param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC. :param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC. :param pulumi.Input[str] peering_connection_id: a cloud provider identifier for the peering connection if available. :param pulumi.Input[str] state: is the state of the peering connection. This property is computed by Aiven therefore cannot be set, only read. Where state can be one of: `APPROVED`, `PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and `INVALID_SPECIFICATION`. :param pulumi.Input[Mapping[str, Any]] state_info: state-specific help or error information. :param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with. an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet. an Azure tenant id in UUID4 form. defines the identifier of the cloud account the VPC is being peered with. defines the region of the remote VPC if it is not in the same region as Aiven VPC. an Azure resource group name of the peered VPC. defines the identifier or name of the remote VPC. a cloud provider identifier for the peering connection if available. is the state of the peering connection. This property is computed by Aiven therefore cannot be set, only read. Where state can be one of: `APPROVED`, `PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and `INVALID_SPECIFICATION`. state-specific help or error information. is the Aiven VPC the peering connection is associated with. ## # VPC Peering Connection Resource The VPC Peering Connection resource allows the creation and management of Aiven VPC Peering Connections. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet. :param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form. :param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being peered with. :param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC. :param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC. :param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC. :param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with. ## # VPC Peering Connection Resource The VPC Peering Connection resource allows the creation and management of Aiven VPC Peering Connections. :param str resource_name: The name of the resource. :param VpcPeeringConnectionArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing VpcPeeringConnection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] peer_azure_app_id: an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet. :param pulumi.Input[str] peer_azure_tenant_id: an Azure tenant id in UUID4 form. :param pulumi.Input[str] peer_cloud_account: defines the identifier of the cloud account the VPC is being peered with. :param pulumi.Input[str] peer_region: defines the region of the remote VPC if it is not in the same region as Aiven VPC. :param pulumi.Input[str] peer_resource_group: an Azure resource group name of the peered VPC. :param pulumi.Input[str] peer_vpc: defines the identifier or name of the remote VPC. :param pulumi.Input[str] peering_connection_id: a cloud provider identifier for the peering connection if available. :param pulumi.Input[str] state: is the state of the peering connection. This property is computed by Aiven therefore cannot be set, only read. Where state can be one of: `APPROVED`, `PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and `INVALID_SPECIFICATION`. :param pulumi.Input[Mapping[str, Any]] state_info: state-specific help or error information. :param pulumi.Input[str] vpc_id: is the Aiven VPC the peering connection is associated with. an Azure app registration id in UUID4 form that is allowed to create a peering to the peer vnet. an Azure tenant id in UUID4 form. defines the identifier of the cloud account the VPC is being peered with. defines the region of the remote VPC if it is not in the same region as Aiven VPC. an Azure resource group name of the peered VPC. defines the identifier or name of the remote VPC. a cloud provider identifier for the peering connection if available. is the state of the peering connection. This property is computed by Aiven therefore cannot be set, only read. Where state can be one of: `APPROVED`, `PENDING_PEER`, `ACTIVE`, `DELETED`, `DELETED_BY_PEER`, `REJECTED_BY_PEER` and `INVALID_SPECIFICATION`. state-specific help or error information. is the Aiven VPC the peering connection is associated with.
| 1.923163
| 2
|
TMC_IWAE_plots.py
|
LinuNils/TMC_reproduced
| 0
|
6629744
|
<filename>TMC_IWAE_plots.py
import numpy as np
import matplotlib.pyplot as plt
import csv
plt.rcParams.update({'font.size': 14})
from argparse import ArgumentParser
"""
For the Non-factorized small models
"""
iwae_nf_s_loss = np.load('/Test Results/IWAE_model_non_fac_small_k_20_iwae_obj/tot_obj_loss_k_20_non_fac_small.npy')
tmc_nf_s_loss = np.load('/Test Results/TMC_model_non_fac_small_k_20_objective_TMC/tot_obj_loss_k_20_non_fac_small.npy')
with open('nfs_iwa_std.csv', 'r') as f:
temp = csv.reader('nfs_iwa_std.csv', delimiter=',')
original_tmc_nf_s_loss = np.array(temp)
print(original_tmc_nf_s_loss)
mean_iwae_nf_s = np.mean(iwae_nf_s_loss[-50:])
mean_tmc_nf_s = np.mean(tmc_nf_s_loss[-50:])
std_iwae_nf_s = np.std(iwae_nf_s_loss[-50:])
std_tmc_nf_s = np.std(tmc_nf_s_loss[-50:])
print('---------------------------------------------------')
print('Mean loss for the last 50 epochs on IWAE small NF: ', -mean_iwae_nf_s)
print('StdDev for the last 50 epochs on IWAE small NF: ', std_iwae_nf_s)
print('Mean loss for the last 50 epochs on TMC small NF: ', -mean_tmc_nf_s)
print('StdDev for the last 50 epochs on TMC large NF: ', std_tmc_nf_s)
print('---------------------------------------------------')
x_ax_data = np.arange(iwae_nf_s_loss.shape[-1])
fig = plt.figure()
plt.plot(x_ax_data, -iwae_nf_s_loss, x_ax_data, -tmc_nf_s_loss)
plt.legend(['IWAE test loss on IWAE obj.', 'TMC test loss on TMC obj.'], loc='upper right')
plt.xlabel('Epochs')
plt.ylim(-100, -90)
plt.xlim(1, 1200)
plt.ylabel('Objective value')
plt.title('Non-factorized small models evaluated on IWAE/TMC objective')
plt.show()
"""
For the Non-factorized large models
"""
iwae_nf_l_loss = np.load('/Test Results/IWAE_model_non_fac_large_k_20_iwae_obj/tot_obj_loss_k_20_non_fac_large.npy')
tmc_nf_l_loss = np.load('/Test Results/TMC_model_non_fac_large_k_20_objective_TMC/tot_obj_loss_k_20_non_fac_large.npy')
mean_iwae_nf_l = np.mean(iwae_nf_l_loss[-50:])
mean_tmc_nf_l = np.mean(tmc_nf_l_loss[-50:])
std_iwae_nf_l = np.std(iwae_nf_l_loss[-50:])
std_tmc_nf_l = np.std(tmc_nf_l_loss[-50:])
print('---------------------------------------------------')
print('Mean loss for the last 50 epochs on IWAE large NF: ', -mean_iwae_nf_l)
print('StdDev for the last 50 epochs on IWAE large NF: ', std_iwae_nf_l)
print('Mean loss for the last 50 epochs on TMC large NF: ', -mean_tmc_nf_l)
print('StdDev for the last 50 epochs on TMC large NF: ', std_tmc_nf_l)
print('---------------------------------------------------')
fig_2 = plt.figure()
plt.plot(x_ax_data, -iwae_nf_l_loss, x_ax_data, -tmc_nf_l_loss)
plt.legend(['IWAE test loss on IWAE obj.', 'TMC test loss on TMC obj.'], loc='upper right')
plt.xlabel('Epochs')
plt.ylim(-100, -90)
plt.xlim(1, 1200)
plt.ylabel('Objective value')
plt.title('Non-factorized large models evaluated on IWAE/TMC objective')
plt.show()
"""
For the hyper parameter search
"""
# TODO
# TODO Will need several subplots here
# TODO we will have 4 different k's and 5 different learning rates
# TODO so what should we have as structure? 4 subplots with 5 curves in each?
x_hp_data = np.arange(500)
# # for k=1
tmc_1_lr1 = np.load('/Test Results/Hyper parameter search/k_1/TMC_model_hyp_param_srch_non_fac_small_k_1_learn_rate_0.001/TMC_hyp_param_srch_tot_obj_loss_k_1_non_fac_small_lr_0.001.npy') # lr1 = 1e-3
tmc_1_lr2 = np.load('/Test Results/Hyper parameter search/k_1/TMC_model_hyp_param_srch_non_fac_small_k_1_learn_rate_0.000101/TMC_hyp_param_srch_tot_obj_loss_k_1_non_fac_small_lr_0.000101.npy') # lr1 = 1e-4
tmc_1_lr3 = np.load('/Test Results/Hyper parameter search/k_1/TMC_model_hyp_param_srch_non_fac_small_k_1_learn_rate_1e-05/TMC_hyp_param_srch_tot_obj_loss_k_1_non_fac_small_lr_1e-05.npy') # lr1 = 1e-5
fig_3 = plt.figure()
plt.plot(x_hp_data, -tmc_1_lr1, x_hp_data, -tmc_1_lr2, x_hp_data, -tmc_1_lr3)
plt.legend(['lr=1e-3', 'lr=1e-4', 'lr=1e-5'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-135, -91)
plt.xlim(1, 500)
plt.ylabel('Objective value')
plt.title('TMC hyper-param search with K=1')
plt.show()
# # for k=5
tmc_5_lr1 = np.load('/Test Results/Hyper parameter search/k_5/TMC_model_hyp_param_srch_non_fac_small_k_5_learn_rate_0.001/TMC_hyp_param_srch_tot_obj_loss_k_5_non_fac_small_lr_0.001.npy') # lr1 = 1e-3
tmc_5_lr2 = np.load('/Test Results/Hyper parameter search/k_5/TMC_model_hyp_param_srch_non_fac_small_k_5_learn_rate_0.0001/TMC_hyp_param_srch_tot_obj_loss_k_5_non_fac_small_lr_0.0001.npy') # lr1 = 1e-4
tmc_5_lr3 = np.load('/Test Results/Hyper parameter search/k_5/TMC_model_hyp_param_srch_non_fac_small_k_5_learn_rate_1e-05/TMC_hyp_param_srch_tot_obj_loss_k_5_non_fac_small_lr_1e-05.npy') # lr1 = 1e-5
fig_4 = plt.figure()
plt.plot(x_hp_data, -tmc_5_lr1, x_hp_data, -tmc_5_lr2, x_hp_data, -tmc_5_lr3)
plt.legend(['lr=1e-3', 'lr=1e-4', 'lr=1e-5'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-135, -91)
plt.xlim(1, 500)
plt.ylabel('Objective value')
plt.title('TMC hyper-param search with K=5')
plt.show()
# # for k=20
tmc_20_lr1 = np.load('/Test Results/Hyper parameter search/k_20/TMC_model_hyp_param_srch_non_fac_small_k_20_learn_rate_0.001/TMC_hyp_param_srch_tot_obj_loss_k_20_non_fac_small_lr_0.001.npy') # lr1 = 1e-3
tmc_20_lr2 = np.load('/Test Results/Hyper parameter search/k_20/TMC_model_hyp_param_srch_non_fac_small_k_20_learn_rate_0.0001/TMC_hyp_param_srch_tot_obj_loss_k_20_non_fac_small_lr_0.0001.npy') # lr1 = 1e-4
tmc_20_lr3 = np.load('/Test Results/Hyper parameter search/k_20/TMC_model_hyp_param_srch_non_fac_small_k_20_learn_rate_1e-05/TMC_hyp_param_srch_tot_obj_loss_k_20_non_fac_small_lr_1e-05.npy') # lr1 = 1e-5
fig_5 = plt.figure()
plt.plot(x_hp_data, -tmc_20_lr1, x_hp_data, -tmc_20_lr2, x_hp_data, -tmc_20_lr3)
plt.legend(['lr=1e-3', 'lr=1e-4', 'lr=1e-5'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-135, -91)
plt.xlim(1, 500)
plt.ylabel('Objective value')
plt.title('TMC hyper-param search with K=20')
plt.show()
# for k=50
tmc_50_lr1 = np.load('/Test Results/Hyper parameter search/k_50/TMC_model_hyp_param_srch_non_fac_small_k_50_learn_rate_0.001/TMC_hyp_param_srch_tot_obj_loss_k_50_non_fac_small_lr_0.001.npy') # lr1 = 1e-3
tmc_50_lr2 = np.load('/Test Results/Hyper parameter search/k_50/TMC_model_hyp_param_srch_non_fac_small_k_50_learn_rate_0.0001/TMC_hyp_param_srch_tot_obj_loss_k_50_non_fac_small_lr_0.0001.npy') # lr1 = 1e-4
tmc_50_lr3 = np.load('/Test Results/Hyper parameter search/k_50/TMC_model_hyp_param_srch_non_fac_small_k_50_learn_rate_1e-05/TMC_hyp_param_srch_tot_obj_loss_k_50_non_fac_small_lr_1e-05.npy') # lr1 = 1e-5
fig_6 = plt.figure()
plt.plot(x_hp_data, -tmc_50_lr1, x_hp_data, -tmc_50_lr2, x_hp_data, -tmc_50_lr3)
plt.legend(['lr=1e-3', 'lr=1e-4', 'lr=1e-5'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-135, -91)
plt.xlim(1, 500)
plt.ylabel('Objective value')
plt.title('TMC hyper-param search with K=50')
plt.show()
# put all HP_searches within one plot
# figure_7 = plt.figure()
# plt.plot(x_hp_data, -tmc_1_lr1, color='red')
# plt.plot(x_hp_data, -tmc_1_lr2, marker='^', color='red')
# plt.plot(x_hp_data, -tmc_1_lr3, marker='s', color='red')
# plt.plot(x_hp_data, -tmc_5_lr1, color='blue')
# plt.plot(x_hp_data, -tmc_5_lr2, marker='^', color='blue')
# plt.plot(x_hp_data, -tmc_5_lr3, marker='s', color='blue')
# plt.plot(x_hp_data, -tmc_20_lr1, color='green')
# plt.plot(x_hp_data, -tmc_20_lr2, marker='^', color='green')
# plt.plot(x_hp_data, -tmc_20_lr3, marker='s', color='green')
# plt.plot(x_hp_data, -tmc_20_lr1, color='black')
# plt.plot(x_hp_data, -tmc_20_lr2, marker='^', color='black')
# plt.plot(x_hp_data, -tmc_20_lr3, marker='s', color='black')
#
# plt.ylabel('Objective value')
# plt.xlabel('Epochs')
# plt.ylim(-135, -91)
# plt.xlim(1, 500)
# print(tmc_20_lr1)
# plt.legend(['K=1 lr=1e-3', 'K=1 lr=1e-4', 'K=1 lr=1e-5',
# 'K=5 lr=1e-3', 'K=5 lr=1e-4', 'K=5 lr=1e-5',
# 'K=20 lr=1e-3', 'K=20 lr=1e-4', 'K=20 lr=1e-5',
# 'K=50 lr=1e-3', 'K=50 lr=1e-4', 'K=50 lr=1e-5'],
# bbox_to_anchor=(1, 1.05), loc="upper left")
# plt.show()
# for te case of using vanilla IWAE structure and TMC with vanilla IWAE structure
vanilla_iwae = np.load('/Test Results/Vanilla_IWAE_model_non_fac_small_k_5_iwae_obj/tot_obj_loss_k_5_non_fac_small_vanilla_IWAE.npy') # IWAE with the structure proposed by the IWAE authors
tmc_vanilla_iwae = np.load('/Test Results/TMC_model_with_Vanilla_IWAE_structure_non_fac_small_k_5_TMC_obj/tot_obj_loss_k_5_non_fac_small.npy') # TMC with IWAE structure proposed by the IWAE authors
x_data = np.arange(vanilla_iwae.shape[-1])
fig_8 = plt.figure()
plt.plot(x_data, -vanilla_iwae, x_data, -tmc_vanilla_iwae)
plt.legend(['Vanilla IWAE', 'TMC with vanilla structure'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-140, -115)
plt.xlim(1, 1200)
plt.ylabel('Objective value')
plt.title('Ablation study')
plt.show()
|
<filename>TMC_IWAE_plots.py
import numpy as np
import matplotlib.pyplot as plt
import csv
plt.rcParams.update({'font.size': 14})
from argparse import ArgumentParser
"""
For the Non-factorized small models
"""
iwae_nf_s_loss = np.load('/Test Results/IWAE_model_non_fac_small_k_20_iwae_obj/tot_obj_loss_k_20_non_fac_small.npy')
tmc_nf_s_loss = np.load('/Test Results/TMC_model_non_fac_small_k_20_objective_TMC/tot_obj_loss_k_20_non_fac_small.npy')
with open('nfs_iwa_std.csv', 'r') as f:
temp = csv.reader('nfs_iwa_std.csv', delimiter=',')
original_tmc_nf_s_loss = np.array(temp)
print(original_tmc_nf_s_loss)
mean_iwae_nf_s = np.mean(iwae_nf_s_loss[-50:])
mean_tmc_nf_s = np.mean(tmc_nf_s_loss[-50:])
std_iwae_nf_s = np.std(iwae_nf_s_loss[-50:])
std_tmc_nf_s = np.std(tmc_nf_s_loss[-50:])
print('---------------------------------------------------')
print('Mean loss for the last 50 epochs on IWAE small NF: ', -mean_iwae_nf_s)
print('StdDev for the last 50 epochs on IWAE small NF: ', std_iwae_nf_s)
print('Mean loss for the last 50 epochs on TMC small NF: ', -mean_tmc_nf_s)
print('StdDev for the last 50 epochs on TMC large NF: ', std_tmc_nf_s)
print('---------------------------------------------------')
x_ax_data = np.arange(iwae_nf_s_loss.shape[-1])
fig = plt.figure()
plt.plot(x_ax_data, -iwae_nf_s_loss, x_ax_data, -tmc_nf_s_loss)
plt.legend(['IWAE test loss on IWAE obj.', 'TMC test loss on TMC obj.'], loc='upper right')
plt.xlabel('Epochs')
plt.ylim(-100, -90)
plt.xlim(1, 1200)
plt.ylabel('Objective value')
plt.title('Non-factorized small models evaluated on IWAE/TMC objective')
plt.show()
"""
For the Non-factorized large models
"""
iwae_nf_l_loss = np.load('/Test Results/IWAE_model_non_fac_large_k_20_iwae_obj/tot_obj_loss_k_20_non_fac_large.npy')
tmc_nf_l_loss = np.load('/Test Results/TMC_model_non_fac_large_k_20_objective_TMC/tot_obj_loss_k_20_non_fac_large.npy')
mean_iwae_nf_l = np.mean(iwae_nf_l_loss[-50:])
mean_tmc_nf_l = np.mean(tmc_nf_l_loss[-50:])
std_iwae_nf_l = np.std(iwae_nf_l_loss[-50:])
std_tmc_nf_l = np.std(tmc_nf_l_loss[-50:])
print('---------------------------------------------------')
print('Mean loss for the last 50 epochs on IWAE large NF: ', -mean_iwae_nf_l)
print('StdDev for the last 50 epochs on IWAE large NF: ', std_iwae_nf_l)
print('Mean loss for the last 50 epochs on TMC large NF: ', -mean_tmc_nf_l)
print('StdDev for the last 50 epochs on TMC large NF: ', std_tmc_nf_l)
print('---------------------------------------------------')
fig_2 = plt.figure()
plt.plot(x_ax_data, -iwae_nf_l_loss, x_ax_data, -tmc_nf_l_loss)
plt.legend(['IWAE test loss on IWAE obj.', 'TMC test loss on TMC obj.'], loc='upper right')
plt.xlabel('Epochs')
plt.ylim(-100, -90)
plt.xlim(1, 1200)
plt.ylabel('Objective value')
plt.title('Non-factorized large models evaluated on IWAE/TMC objective')
plt.show()
"""
For the hyper parameter search
"""
# TODO
# TODO Will need several subplots here
# TODO we will have 4 different k's and 5 different learning rates
# TODO so what should we have as structure? 4 subplots with 5 curves in each?
x_hp_data = np.arange(500)
# # for k=1
tmc_1_lr1 = np.load('/Test Results/Hyper parameter search/k_1/TMC_model_hyp_param_srch_non_fac_small_k_1_learn_rate_0.001/TMC_hyp_param_srch_tot_obj_loss_k_1_non_fac_small_lr_0.001.npy') # lr1 = 1e-3
tmc_1_lr2 = np.load('/Test Results/Hyper parameter search/k_1/TMC_model_hyp_param_srch_non_fac_small_k_1_learn_rate_0.000101/TMC_hyp_param_srch_tot_obj_loss_k_1_non_fac_small_lr_0.000101.npy') # lr1 = 1e-4
tmc_1_lr3 = np.load('/Test Results/Hyper parameter search/k_1/TMC_model_hyp_param_srch_non_fac_small_k_1_learn_rate_1e-05/TMC_hyp_param_srch_tot_obj_loss_k_1_non_fac_small_lr_1e-05.npy') # lr1 = 1e-5
fig_3 = plt.figure()
plt.plot(x_hp_data, -tmc_1_lr1, x_hp_data, -tmc_1_lr2, x_hp_data, -tmc_1_lr3)
plt.legend(['lr=1e-3', 'lr=1e-4', 'lr=1e-5'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-135, -91)
plt.xlim(1, 500)
plt.ylabel('Objective value')
plt.title('TMC hyper-param search with K=1')
plt.show()
# # for k=5
tmc_5_lr1 = np.load('/Test Results/Hyper parameter search/k_5/TMC_model_hyp_param_srch_non_fac_small_k_5_learn_rate_0.001/TMC_hyp_param_srch_tot_obj_loss_k_5_non_fac_small_lr_0.001.npy') # lr1 = 1e-3
tmc_5_lr2 = np.load('/Test Results/Hyper parameter search/k_5/TMC_model_hyp_param_srch_non_fac_small_k_5_learn_rate_0.0001/TMC_hyp_param_srch_tot_obj_loss_k_5_non_fac_small_lr_0.0001.npy') # lr1 = 1e-4
tmc_5_lr3 = np.load('/Test Results/Hyper parameter search/k_5/TMC_model_hyp_param_srch_non_fac_small_k_5_learn_rate_1e-05/TMC_hyp_param_srch_tot_obj_loss_k_5_non_fac_small_lr_1e-05.npy') # lr1 = 1e-5
fig_4 = plt.figure()
plt.plot(x_hp_data, -tmc_5_lr1, x_hp_data, -tmc_5_lr2, x_hp_data, -tmc_5_lr3)
plt.legend(['lr=1e-3', 'lr=1e-4', 'lr=1e-5'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-135, -91)
plt.xlim(1, 500)
plt.ylabel('Objective value')
plt.title('TMC hyper-param search with K=5')
plt.show()
# # for k=20
tmc_20_lr1 = np.load('/Test Results/Hyper parameter search/k_20/TMC_model_hyp_param_srch_non_fac_small_k_20_learn_rate_0.001/TMC_hyp_param_srch_tot_obj_loss_k_20_non_fac_small_lr_0.001.npy') # lr1 = 1e-3
tmc_20_lr2 = np.load('/Test Results/Hyper parameter search/k_20/TMC_model_hyp_param_srch_non_fac_small_k_20_learn_rate_0.0001/TMC_hyp_param_srch_tot_obj_loss_k_20_non_fac_small_lr_0.0001.npy') # lr1 = 1e-4
tmc_20_lr3 = np.load('/Test Results/Hyper parameter search/k_20/TMC_model_hyp_param_srch_non_fac_small_k_20_learn_rate_1e-05/TMC_hyp_param_srch_tot_obj_loss_k_20_non_fac_small_lr_1e-05.npy') # lr1 = 1e-5
fig_5 = plt.figure()
plt.plot(x_hp_data, -tmc_20_lr1, x_hp_data, -tmc_20_lr2, x_hp_data, -tmc_20_lr3)
plt.legend(['lr=1e-3', 'lr=1e-4', 'lr=1e-5'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-135, -91)
plt.xlim(1, 500)
plt.ylabel('Objective value')
plt.title('TMC hyper-param search with K=20')
plt.show()
# for k=50
tmc_50_lr1 = np.load('/Test Results/Hyper parameter search/k_50/TMC_model_hyp_param_srch_non_fac_small_k_50_learn_rate_0.001/TMC_hyp_param_srch_tot_obj_loss_k_50_non_fac_small_lr_0.001.npy') # lr1 = 1e-3
tmc_50_lr2 = np.load('/Test Results/Hyper parameter search/k_50/TMC_model_hyp_param_srch_non_fac_small_k_50_learn_rate_0.0001/TMC_hyp_param_srch_tot_obj_loss_k_50_non_fac_small_lr_0.0001.npy') # lr1 = 1e-4
tmc_50_lr3 = np.load('/Test Results/Hyper parameter search/k_50/TMC_model_hyp_param_srch_non_fac_small_k_50_learn_rate_1e-05/TMC_hyp_param_srch_tot_obj_loss_k_50_non_fac_small_lr_1e-05.npy') # lr1 = 1e-5
fig_6 = plt.figure()
plt.plot(x_hp_data, -tmc_50_lr1, x_hp_data, -tmc_50_lr2, x_hp_data, -tmc_50_lr3)
plt.legend(['lr=1e-3', 'lr=1e-4', 'lr=1e-5'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-135, -91)
plt.xlim(1, 500)
plt.ylabel('Objective value')
plt.title('TMC hyper-param search with K=50')
plt.show()
# put all HP_searches within one plot
# figure_7 = plt.figure()
# plt.plot(x_hp_data, -tmc_1_lr1, color='red')
# plt.plot(x_hp_data, -tmc_1_lr2, marker='^', color='red')
# plt.plot(x_hp_data, -tmc_1_lr3, marker='s', color='red')
# plt.plot(x_hp_data, -tmc_5_lr1, color='blue')
# plt.plot(x_hp_data, -tmc_5_lr2, marker='^', color='blue')
# plt.plot(x_hp_data, -tmc_5_lr3, marker='s', color='blue')
# plt.plot(x_hp_data, -tmc_20_lr1, color='green')
# plt.plot(x_hp_data, -tmc_20_lr2, marker='^', color='green')
# plt.plot(x_hp_data, -tmc_20_lr3, marker='s', color='green')
# plt.plot(x_hp_data, -tmc_20_lr1, color='black')
# plt.plot(x_hp_data, -tmc_20_lr2, marker='^', color='black')
# plt.plot(x_hp_data, -tmc_20_lr3, marker='s', color='black')
#
# plt.ylabel('Objective value')
# plt.xlabel('Epochs')
# plt.ylim(-135, -91)
# plt.xlim(1, 500)
# print(tmc_20_lr1)
# plt.legend(['K=1 lr=1e-3', 'K=1 lr=1e-4', 'K=1 lr=1e-5',
# 'K=5 lr=1e-3', 'K=5 lr=1e-4', 'K=5 lr=1e-5',
# 'K=20 lr=1e-3', 'K=20 lr=1e-4', 'K=20 lr=1e-5',
# 'K=50 lr=1e-3', 'K=50 lr=1e-4', 'K=50 lr=1e-5'],
# bbox_to_anchor=(1, 1.05), loc="upper left")
# plt.show()
# for te case of using vanilla IWAE structure and TMC with vanilla IWAE structure
vanilla_iwae = np.load('/Test Results/Vanilla_IWAE_model_non_fac_small_k_5_iwae_obj/tot_obj_loss_k_5_non_fac_small_vanilla_IWAE.npy') # IWAE with the structure proposed by the IWAE authors
tmc_vanilla_iwae = np.load('/Test Results/TMC_model_with_Vanilla_IWAE_structure_non_fac_small_k_5_TMC_obj/tot_obj_loss_k_5_non_fac_small.npy') # TMC with IWAE structure proposed by the IWAE authors
x_data = np.arange(vanilla_iwae.shape[-1])
fig_8 = plt.figure()
plt.plot(x_data, -vanilla_iwae, x_data, -tmc_vanilla_iwae)
plt.legend(['Vanilla IWAE', 'TMC with vanilla structure'], loc='lower right')
plt.xlabel('Epochs')
plt.ylim(-140, -115)
plt.xlim(1, 1200)
plt.ylabel('Objective value')
plt.title('Ablation study')
plt.show()
|
en
| 0.442872
|
For the Non-factorized small models For the Non-factorized large models For the hyper parameter search # TODO # TODO Will need several subplots here # TODO we will have 4 different k's and 5 different learning rates # TODO so what should we have as structure? 4 subplots with 5 curves in each? # # for k=1 # lr1 = 1e-3 # lr1 = 1e-4 # lr1 = 1e-5 # # for k=5 # lr1 = 1e-3 # lr1 = 1e-4 # lr1 = 1e-5 # # for k=20 # lr1 = 1e-3 # lr1 = 1e-4 # lr1 = 1e-5 # for k=50 # lr1 = 1e-3 # lr1 = 1e-4 # lr1 = 1e-5 # put all HP_searches within one plot # figure_7 = plt.figure() # plt.plot(x_hp_data, -tmc_1_lr1, color='red') # plt.plot(x_hp_data, -tmc_1_lr2, marker='^', color='red') # plt.plot(x_hp_data, -tmc_1_lr3, marker='s', color='red') # plt.plot(x_hp_data, -tmc_5_lr1, color='blue') # plt.plot(x_hp_data, -tmc_5_lr2, marker='^', color='blue') # plt.plot(x_hp_data, -tmc_5_lr3, marker='s', color='blue') # plt.plot(x_hp_data, -tmc_20_lr1, color='green') # plt.plot(x_hp_data, -tmc_20_lr2, marker='^', color='green') # plt.plot(x_hp_data, -tmc_20_lr3, marker='s', color='green') # plt.plot(x_hp_data, -tmc_20_lr1, color='black') # plt.plot(x_hp_data, -tmc_20_lr2, marker='^', color='black') # plt.plot(x_hp_data, -tmc_20_lr3, marker='s', color='black') # # plt.ylabel('Objective value') # plt.xlabel('Epochs') # plt.ylim(-135, -91) # plt.xlim(1, 500) # print(tmc_20_lr1) # plt.legend(['K=1 lr=1e-3', 'K=1 lr=1e-4', 'K=1 lr=1e-5', # 'K=5 lr=1e-3', 'K=5 lr=1e-4', 'K=5 lr=1e-5', # 'K=20 lr=1e-3', 'K=20 lr=1e-4', 'K=20 lr=1e-5', # 'K=50 lr=1e-3', 'K=50 lr=1e-4', 'K=50 lr=1e-5'], # bbox_to_anchor=(1, 1.05), loc="upper left") # plt.show() # for te case of using vanilla IWAE structure and TMC with vanilla IWAE structure # IWAE with the structure proposed by the IWAE authors # TMC with IWAE structure proposed by the IWAE authors
| 2.633065
| 3
|
test/core/parser/conftest.py
|
m-kuhn/sqlfluff
| 0
|
6629745
|
<gh_stars>0
"""Test fixtures for parser tests."""
import pytest
from sqlfluff.core.dialects import ansi_dialect
@pytest.fixture(scope="function")
def fresh_ansi_dialect():
"""Expand the ansi dialect for use."""
dialect = ansi_dialect
dialect.expand()
return dialect
@pytest.fixture(scope="function")
def seg_list(generate_test_segments):
"""A preset list of segments for testing."""
return generate_test_segments(["bar", " \t ", "foo", "baar", " \t "])
@pytest.fixture(scope="function")
def bracket_seg_list(generate_test_segments):
"""Another preset list of segments for testing."""
return generate_test_segments(
["bar", " \t ", "(", "foo", " ", ")", "baar", " \t ", "foo"]
)
|
"""Test fixtures for parser tests."""
import pytest
from sqlfluff.core.dialects import ansi_dialect
@pytest.fixture(scope="function")
def fresh_ansi_dialect():
"""Expand the ansi dialect for use."""
dialect = ansi_dialect
dialect.expand()
return dialect
@pytest.fixture(scope="function")
def seg_list(generate_test_segments):
"""A preset list of segments for testing."""
return generate_test_segments(["bar", " \t ", "foo", "baar", " \t "])
@pytest.fixture(scope="function")
def bracket_seg_list(generate_test_segments):
"""Another preset list of segments for testing."""
return generate_test_segments(
["bar", " \t ", "(", "foo", " ", ")", "baar", " \t ", "foo"]
)
|
en
| 0.782747
|
Test fixtures for parser tests. Expand the ansi dialect for use. A preset list of segments for testing. Another preset list of segments for testing.
| 2.435356
| 2
|
server_config.py
|
jbalint/aurum-datadiscovery
| 60
|
6629746
|
# input parameters
#path_model = "/Users/ra-mit/development/discovery_proto/models/tpch/"
#separator = "|"
path_model = "/Users/ra-mit/development/discovery_proto/models/massdata/"
separator = ","
#path_model = "/Users/ra-mit/development/discovery_proto/models/mitdwh/"
#separator = ","
|
# input parameters
#path_model = "/Users/ra-mit/development/discovery_proto/models/tpch/"
#separator = "|"
path_model = "/Users/ra-mit/development/discovery_proto/models/massdata/"
separator = ","
#path_model = "/Users/ra-mit/development/discovery_proto/models/mitdwh/"
#separator = ","
|
en
| 0.612613
|
# input parameters #path_model = "/Users/ra-mit/development/discovery_proto/models/tpch/" #separator = "|" #path_model = "/Users/ra-mit/development/discovery_proto/models/mitdwh/" #separator = ","
| 1.146972
| 1
|
bluefloodclient/main.py
|
absalon-james/python-blueflood
| 0
|
6629747
|
from client import Blueflood, Datapoint
from utils import time_in_ms
import pprint
auth_url = 'https://identity.api.rackspacecloud.com/v2.0/'
apikey = '<KEY>'
username = 'privateclouddevs'
client = Blueflood(auth_url=auth_url, apikey=apikey, username="privateclouddevs")
point = {
'collectionTime': 1442262994835,
'metricName': 'james.test.number',
'metricValue': 55,
'ttlInSeconds': 3600
}
#point = Datapoint('intel.suda-devstack-dfw.Threads_created', 55, collection_time=1442262994835, ttl_seconds=3600)
print "Ingesting"
client.ingest(point)
print "Getting"
resp = client.get_metrics(0, time_in_ms(), ['james.test.number'], points=100)
pprint.pprint(resp)
|
from client import Blueflood, Datapoint
from utils import time_in_ms
import pprint
auth_url = 'https://identity.api.rackspacecloud.com/v2.0/'
apikey = '<KEY>'
username = 'privateclouddevs'
client = Blueflood(auth_url=auth_url, apikey=apikey, username="privateclouddevs")
point = {
'collectionTime': 1442262994835,
'metricName': 'james.test.number',
'metricValue': 55,
'ttlInSeconds': 3600
}
#point = Datapoint('intel.suda-devstack-dfw.Threads_created', 55, collection_time=1442262994835, ttl_seconds=3600)
print "Ingesting"
client.ingest(point)
print "Getting"
resp = client.get_metrics(0, time_in_ms(), ['james.test.number'], points=100)
pprint.pprint(resp)
|
en
| 0.360113
|
#point = Datapoint('intel.suda-devstack-dfw.Threads_created', 55, collection_time=1442262994835, ttl_seconds=3600)
| 2.409807
| 2
|
third_party/crashpad/crashpad/util/util_test.gyp
|
google-ar/chromium
| 777
|
6629748
|
<gh_stars>100-1000
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'../build/crashpad.gypi',
],
'targets': [
{
'target_name': 'crashpad_util_test',
'type': 'executable',
'dependencies': [
'util.gyp:crashpad_util',
'../client/client.gyp:crashpad_client',
'../compat/compat.gyp:crashpad_compat',
'../test/test.gyp:crashpad_test',
'../third_party/gtest/gmock.gyp:gmock',
'../third_party/gtest/gmock.gyp:gmock_main',
'../third_party/gtest/gtest.gyp:gtest',
'../third_party/mini_chromium/mini_chromium.gyp:base',
],
'include_dirs': [
'..',
],
'sources': [
'file/file_io_test.cc',
'file/string_file_test.cc',
'mac/launchd_test.mm',
'mac/mac_util_test.mm',
'mac/service_management_test.mm',
'mac/xattr_test.cc',
'mach/child_port_handshake_test.cc',
'mach/child_port_server_test.cc',
'mach/composite_mach_message_server_test.cc',
'mach/exc_client_variants_test.cc',
'mach/exc_server_variants_test.cc',
'mach/exception_behaviors_test.cc',
'mach/exception_ports_test.cc',
'mach/exception_types_test.cc',
'mach/mach_extensions_test.cc',
'mach/mach_message_server_test.cc',
'mach/mach_message_test.cc',
'mach/notify_server_test.cc',
'mach/scoped_task_suspend_test.cc',
'mach/symbolic_constants_mach_test.cc',
'mach/task_memory_test.cc',
'misc/arraysize_unsafe_test.cc',
'misc/clock_test.cc',
'misc/initialization_state_dcheck_test.cc',
'misc/initialization_state_test.cc',
'misc/scoped_forbid_return_test.cc',
'misc/random_string_test.cc',
'misc/uuid_test.cc',
'net/http_body_test.cc',
'net/http_body_test_util.cc',
'net/http_body_test_util.h',
'net/http_multipart_builder_test.cc',
'net/http_transport_test.cc',
'numeric/checked_address_range_test.cc',
'numeric/checked_range_test.cc',
'numeric/in_range_cast_test.cc',
'numeric/int128_test.cc',
'posix/process_info_test.cc',
'posix/symbolic_constants_posix_test.cc',
'stdlib/aligned_allocator_test.cc',
'stdlib/map_insert_test.cc',
'stdlib/string_number_conversion_test.cc',
'stdlib/strlcpy_test.cc',
'stdlib/strnlen_test.cc',
'string/split_string_test.cc',
'synchronization/semaphore_test.cc',
'thread/thread_log_messages_test.cc',
'thread/thread_test.cc',
'thread/worker_thread_test.cc',
'win/capture_context_test.cc',
'win/command_line_test.cc',
'win/critical_section_with_debug_info_test.cc',
'win/exception_handler_server_test.cc',
'win/get_function_test.cc',
'win/handle_test.cc',
'win/initial_client_data_test.cc',
'win/process_info_test.cc',
'win/registration_protocol_win_test.cc',
'win/scoped_process_suspend_test.cc',
'win/time_test.cc',
],
'conditions': [
['OS=="mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
],
},
}],
['OS=="win"', {
'dependencies': [
'crashpad_util_test_process_info_test_child',
],
'link_settings': {
'libraries': [
'-ladvapi32.lib',
'-limagehlp.lib',
'-lrpcrt4.lib',
],
},
}],
],
},
],
'conditions': [
['OS=="win"', {
'targets': [
{
'target_name': 'crashpad_util_test_process_info_test_child',
'type': 'executable',
'sources': [
'win/process_info_test_child.cc',
],
# Set an unusually high load address to make sure that the main
# executable still appears as the first element in
# ProcessInfo::Modules().
'msvs_settings': {
'VCLinkerTool': {
'AdditionalOptions': [
'/BASE:0x78000000',
],
'RandomizedBaseAddress': '1', # /DYNAMICBASE:NO.
'FixedBaseAddress': '2', # /FIXED.
},
},
},
]
}],
],
}
|
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'../build/crashpad.gypi',
],
'targets': [
{
'target_name': 'crashpad_util_test',
'type': 'executable',
'dependencies': [
'util.gyp:crashpad_util',
'../client/client.gyp:crashpad_client',
'../compat/compat.gyp:crashpad_compat',
'../test/test.gyp:crashpad_test',
'../third_party/gtest/gmock.gyp:gmock',
'../third_party/gtest/gmock.gyp:gmock_main',
'../third_party/gtest/gtest.gyp:gtest',
'../third_party/mini_chromium/mini_chromium.gyp:base',
],
'include_dirs': [
'..',
],
'sources': [
'file/file_io_test.cc',
'file/string_file_test.cc',
'mac/launchd_test.mm',
'mac/mac_util_test.mm',
'mac/service_management_test.mm',
'mac/xattr_test.cc',
'mach/child_port_handshake_test.cc',
'mach/child_port_server_test.cc',
'mach/composite_mach_message_server_test.cc',
'mach/exc_client_variants_test.cc',
'mach/exc_server_variants_test.cc',
'mach/exception_behaviors_test.cc',
'mach/exception_ports_test.cc',
'mach/exception_types_test.cc',
'mach/mach_extensions_test.cc',
'mach/mach_message_server_test.cc',
'mach/mach_message_test.cc',
'mach/notify_server_test.cc',
'mach/scoped_task_suspend_test.cc',
'mach/symbolic_constants_mach_test.cc',
'mach/task_memory_test.cc',
'misc/arraysize_unsafe_test.cc',
'misc/clock_test.cc',
'misc/initialization_state_dcheck_test.cc',
'misc/initialization_state_test.cc',
'misc/scoped_forbid_return_test.cc',
'misc/random_string_test.cc',
'misc/uuid_test.cc',
'net/http_body_test.cc',
'net/http_body_test_util.cc',
'net/http_body_test_util.h',
'net/http_multipart_builder_test.cc',
'net/http_transport_test.cc',
'numeric/checked_address_range_test.cc',
'numeric/checked_range_test.cc',
'numeric/in_range_cast_test.cc',
'numeric/int128_test.cc',
'posix/process_info_test.cc',
'posix/symbolic_constants_posix_test.cc',
'stdlib/aligned_allocator_test.cc',
'stdlib/map_insert_test.cc',
'stdlib/string_number_conversion_test.cc',
'stdlib/strlcpy_test.cc',
'stdlib/strnlen_test.cc',
'string/split_string_test.cc',
'synchronization/semaphore_test.cc',
'thread/thread_log_messages_test.cc',
'thread/thread_test.cc',
'thread/worker_thread_test.cc',
'win/capture_context_test.cc',
'win/command_line_test.cc',
'win/critical_section_with_debug_info_test.cc',
'win/exception_handler_server_test.cc',
'win/get_function_test.cc',
'win/handle_test.cc',
'win/initial_client_data_test.cc',
'win/process_info_test.cc',
'win/registration_protocol_win_test.cc',
'win/scoped_process_suspend_test.cc',
'win/time_test.cc',
],
'conditions': [
['OS=="mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
],
},
}],
['OS=="win"', {
'dependencies': [
'crashpad_util_test_process_info_test_child',
],
'link_settings': {
'libraries': [
'-ladvapi32.lib',
'-limagehlp.lib',
'-lrpcrt4.lib',
],
},
}],
],
},
],
'conditions': [
['OS=="win"', {
'targets': [
{
'target_name': 'crashpad_util_test_process_info_test_child',
'type': 'executable',
'sources': [
'win/process_info_test_child.cc',
],
# Set an unusually high load address to make sure that the main
# executable still appears as the first element in
# ProcessInfo::Modules().
'msvs_settings': {
'VCLinkerTool': {
'AdditionalOptions': [
'/BASE:0x78000000',
],
'RandomizedBaseAddress': '1', # /DYNAMICBASE:NO.
'FixedBaseAddress': '2', # /FIXED.
},
},
},
]
}],
],
}
|
en
| 0.872108
|
# Copyright 2014 The Crashpad Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Set an unusually high load address to make sure that the main # executable still appears as the first element in # ProcessInfo::Modules(). # /DYNAMICBASE:NO. # /FIXED.
| 1.044866
| 1
|
schemas/input_conf.py
|
philipsales/couchbase-python-elasticsearch
| 0
|
6629749
|
from schemas.input.kobo import personal_info_5_2_2 as personal_info
from schemas.input.old_curis import old_curis as old_curis
input_schemas = {
'kobo': personal_info.personal_info_schema,
'old_curis': old_curis.old_curis_schema
}
|
from schemas.input.kobo import personal_info_5_2_2 as personal_info
from schemas.input.old_curis import old_curis as old_curis
input_schemas = {
'kobo': personal_info.personal_info_schema,
'old_curis': old_curis.old_curis_schema
}
|
none
| 1
| 1.197032
| 1
|
|
src/pretix/api/views/cart.py
|
rstrblstr/pretix
| 1,248
|
6629750
|
<gh_stars>1000+
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.db import transaction
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.filters import OrderingFilter
from rest_framework.mixins import CreateModelMixin, DestroyModelMixin
from rest_framework.response import Response
from rest_framework.settings import api_settings
from pretix.api.serializers.cart import (
CartPositionCreateSerializer, CartPositionSerializer,
)
from pretix.base.models import CartPosition
from pretix.base.services.locking import NoLockManager
class CartPositionViewSet(CreateModelMixin, DestroyModelMixin, viewsets.ReadOnlyModelViewSet):
serializer_class = CartPositionSerializer
queryset = CartPosition.objects.none()
filter_backends = (OrderingFilter,)
ordering = ('datetime',)
ordering_fields = ('datetime', 'cart_id')
lookup_field = 'id'
permission = 'can_view_orders'
write_permission = 'can_change_orders'
def get_queryset(self):
return CartPosition.objects.filter(
event=self.request.event,
cart_id__endswith="@api"
).select_related('seat').prefetch_related('answers')
def get_serializer_context(self):
ctx = super().get_serializer_context()
ctx['event'] = self.request.event
ctx['quota_cache'] = {}
return ctx
def create(self, request, *args, **kwargs):
serializer = CartPositionCreateSerializer(data=request.data, context=self.get_serializer_context())
serializer.is_valid(raise_exception=True)
with transaction.atomic(), self.request.event.lock():
self.perform_create(serializer)
cp = serializer.instance
serializer = CartPositionSerializer(cp, context=serializer.context)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=False, methods=['POST'])
def bulk_create(self, request, *args, **kwargs):
if not isinstance(request.data, list): # noqa
return Response({"error": "Please supply a list"}, status=status.HTTP_400_BAD_REQUEST)
ctx = self.get_serializer_context()
with transaction.atomic():
serializers = [
CartPositionCreateSerializer(data=d, context=ctx)
for d in request.data
]
lockfn = self.request.event.lock
if not any(s.is_valid(raise_exception=False) for s in serializers):
lockfn = NoLockManager
results = []
with lockfn():
for s in serializers:
if s.is_valid(raise_exception=False):
try:
cp = s.save()
except ValidationError as e:
results.append({
'success': False,
'data': None,
'errors': {api_settings.NON_FIELD_ERRORS_KEY: e.detail},
})
else:
results.append({
'success': True,
'data': CartPositionSerializer(cp, context=ctx).data,
'errors': None,
})
else:
results.append({
'success': False,
'data': None,
'errors': s.errors,
})
return Response({'results': results}, status=status.HTTP_200_OK)
def perform_create(self, serializer):
serializer.save()
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.db import transaction
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.filters import OrderingFilter
from rest_framework.mixins import CreateModelMixin, DestroyModelMixin
from rest_framework.response import Response
from rest_framework.settings import api_settings
from pretix.api.serializers.cart import (
CartPositionCreateSerializer, CartPositionSerializer,
)
from pretix.base.models import CartPosition
from pretix.base.services.locking import NoLockManager
class CartPositionViewSet(CreateModelMixin, DestroyModelMixin, viewsets.ReadOnlyModelViewSet):
serializer_class = CartPositionSerializer
queryset = CartPosition.objects.none()
filter_backends = (OrderingFilter,)
ordering = ('datetime',)
ordering_fields = ('datetime', 'cart_id')
lookup_field = 'id'
permission = 'can_view_orders'
write_permission = 'can_change_orders'
def get_queryset(self):
return CartPosition.objects.filter(
event=self.request.event,
cart_id__endswith="@api"
).select_related('seat').prefetch_related('answers')
def get_serializer_context(self):
ctx = super().get_serializer_context()
ctx['event'] = self.request.event
ctx['quota_cache'] = {}
return ctx
def create(self, request, *args, **kwargs):
serializer = CartPositionCreateSerializer(data=request.data, context=self.get_serializer_context())
serializer.is_valid(raise_exception=True)
with transaction.atomic(), self.request.event.lock():
self.perform_create(serializer)
cp = serializer.instance
serializer = CartPositionSerializer(cp, context=serializer.context)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=False, methods=['POST'])
def bulk_create(self, request, *args, **kwargs):
if not isinstance(request.data, list): # noqa
return Response({"error": "Please supply a list"}, status=status.HTTP_400_BAD_REQUEST)
ctx = self.get_serializer_context()
with transaction.atomic():
serializers = [
CartPositionCreateSerializer(data=d, context=ctx)
for d in request.data
]
lockfn = self.request.event.lock
if not any(s.is_valid(raise_exception=False) for s in serializers):
lockfn = NoLockManager
results = []
with lockfn():
for s in serializers:
if s.is_valid(raise_exception=False):
try:
cp = s.save()
except ValidationError as e:
results.append({
'success': False,
'data': None,
'errors': {api_settings.NON_FIELD_ERRORS_KEY: e.detail},
})
else:
results.append({
'success': True,
'data': CartPositionSerializer(cp, context=ctx).data,
'errors': None,
})
else:
results.append({
'success': False,
'data': None,
'errors': s.errors,
})
return Response({'results': results}, status=status.HTTP_200_OK)
def perform_create(self, serializer):
serializer.save()
|
en
| 0.883374
|
# # This file is part of pretix (Community Edition). # # Copyright (C) 2014-2020 <NAME> and contributors # Copyright (C) 2020-2021 rami.io GmbH and contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation in version 3 of the License. # # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are # applicable granting you additional permissions and placing additional restrictions on your usage of this software. # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive # this file, see <https://pretix.eu/about/en/license>. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with this program. If not, see # <https://www.gnu.org/licenses/>. # # noqa
| 1.769724
| 2
|