hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2044e0f6bddde561c1111e6183bb6d9e3dd28483
| 125
|
py
|
Python
|
example/dj/apps/issue_tracker/forms.py
|
druids/django-fperms-iscore
|
8e919cdc70ed57e0eb6407469de9ef2441ae06ad
|
[
"MIT"
] | 1
|
2019-10-07T12:40:38.000Z
|
2019-10-07T12:40:38.000Z
|
example/dj/apps/issue_tracker/forms.py
|
druids/django-fperms-iscore
|
8e919cdc70ed57e0eb6407469de9ef2441ae06ad
|
[
"MIT"
] | 3
|
2019-08-09T14:10:21.000Z
|
2022-02-01T13:48:01.000Z
|
example/dj/apps/issue_tracker/forms.py
|
druids/django-fperms-iscore
|
8e919cdc70ed57e0eb6407469de9ef2441ae06ad
|
[
"MIT"
] | null | null | null |
from fperms_iscore.forms import PermsFormMixin, GroupsFormMixin
class UserForm(PermsFormMixin, GroupsFormMixin):
pass
| 17.857143
| 63
| 0.824
| 12
| 125
| 8.5
| 0.833333
| 0.568627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128
| 125
| 6
| 64
| 20.833333
| 0.93578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
2052a738dc0b09666691db5322b8884c713544af
| 116
|
py
|
Python
|
transcrypt/development/automated_tests/__future__/print_function.py
|
JMCanning78/Transcrypt
|
8a8dabe831240414fdf1d5027fa2b0d71ab45d05
|
[
"Apache-2.0"
] | 1
|
2017-08-11T01:51:51.000Z
|
2017-08-11T01:51:51.000Z
|
transcrypt/development/automated_tests/__future__/print_function.py
|
JMCanning78/Transcrypt
|
8a8dabe831240414fdf1d5027fa2b0d71ab45d05
|
[
"Apache-2.0"
] | 2
|
2021-03-11T02:24:48.000Z
|
2021-05-11T20:29:52.000Z
|
transcrypt/development/automated_tests/__future__/print_function.py
|
JMCanning78/Transcrypt
|
8a8dabe831240414fdf1d5027fa2b0d71ab45d05
|
[
"Apache-2.0"
] | 1
|
2021-02-07T00:22:12.000Z
|
2021-02-07T00:22:12.000Z
|
from __future__ import print_function
def run(test):
test.check("from __future__ import print_function works")
| 23.2
| 61
| 0.793103
| 16
| 116
| 5.125
| 0.625
| 0.243902
| 0.390244
| 0.512195
| 0.707317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 116
| 4
| 62
| 29
| 0.82
| 0
| 0
| 0
| 0
| 0
| 0.37069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.666667
| 0
| 1
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 9
|
644a955d1ad5b9dca1849ec71b49cb194f52e5a6
| 26,871
|
py
|
Python
|
tvaf/tests/btn_test.py
|
AllSeeingEyeTolledEweSew/tvaf-ftp
|
c53ed4b8b6715d10567fca85847000356a2c7793
|
[
"0BSD"
] | null | null | null |
tvaf/tests/btn_test.py
|
AllSeeingEyeTolledEweSew/tvaf-ftp
|
c53ed4b8b6715d10567fca85847000356a2c7793
|
[
"0BSD"
] | null | null | null |
tvaf/tests/btn_test.py
|
AllSeeingEyeTolledEweSew/tvaf-ftp
|
c53ed4b8b6715d10567fca85847000356a2c7793
|
[
"0BSD"
] | null | null | null |
# Copyright (c) 2020 AllSeeingEyeTolledEweSew
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
"""Tests for the tvaf.btn module."""
import hashlib
import stat as stat_lib
from typing import Any
import unittest
import apsw
import btn
import tvaf.btn as tvaf_btn
import tvaf.fs as fs
# flake8: noqa
def get_mock_db() -> apsw.Connection:
"""Returns a connection to an in-memory database with the btn schemas."""
# TODO(AllSeeingEyeTolledEweSew): Probably cleaner way to do this.
conn = apsw.Connection(":memory:")
btn.Series._create_schema(conn)
btn.Group._create_schema(conn)
btn.TorrentEntry._create_schema(conn)
return conn
def add_fixture_row(conn: apsw.Connection, table: str, **kwargs: Any) -> None:
"""Adds a row to a database.
Args:
conn: The database to modify.
table: The name of the table to update.
kwargs: A mapping from column names to binding values.
"""
keys = sorted(kwargs.keys())
columns = ",".join(keys)
params = ",".join(":" + k for k in keys)
conn.cursor().execute(
f"insert into {table} ({columns}) values ({params})", kwargs
)
def add_series(conn: apsw.Connection, **series: Any) -> None:
"""Adds a Series row to the database, with some default values.
Args:
conn: The database to modify.
series: A mapping from column names to binding values.
"""
data = dict(updated_at=0, deleted=0, id=100)
data.update(series)
add_fixture_row(conn, "series", **data)
def add_group(conn: apsw.Connection, **group: Any) -> None:
"""Adds a Group row to the database, with some default values.
Args:
conn: The database to modify.
group: A mapping from column names to binding values.
"""
data = dict(
category="Episode", updated_at=0, series_id=100, id=110, deleted=0
)
data.update(group)
add_fixture_row(conn, "torrent_entry_group", **data)
def add_entry(conn: apsw.Connection, **torrent_entry: Any) -> None:
"""Adds a TorrentEntry row to the database, with some default values.
Args:
conn: The database to modify.
torrent_entry: A mapping from column names to binding values.
"""
data = dict(
codec="H.264",
container="MKV",
origin="Scene",
release_name="Test",
resolution="1080p",
size=1048576,
source="Bluray",
snatched=0,
leechers=0,
seeders=0,
time=1234567,
updated_at=0,
id=111,
group_id=110,
deleted=0,
)
data.update(torrent_entry)
data["info_hash"] = hashlib.sha1(str(data["id"]).encode()).hexdigest()
add_fixture_row(conn, "torrent_entry", **data)
def add_file(conn: apsw.Connection, **file_info: Any) -> None:
"""Adds a FileInfo row to the database, with some default values.
Args:
conn: The database to modify.
file_info: A mapping from column names to binding values.
"""
data = dict(updated_at=0, file_index=0, id=111)
data.update(file_info)
assert isinstance(data["path"], bytes)
add_fixture_row(conn, "file_info", **data)
class TestBrowseFile(unittest.TestCase):
"""Tests for accessing a file under browse/..."""
def test_file_access(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, time=12345678)
add_file(conn, path=b"a.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G/a.mkv")
stat = node.stat()
self.assertEqual(stat.mtime, 12345678)
self.assertEqual(stat.size, 100)
ref = node.get_torrent_ref()
self.assertEqual(ref.tracker, "btn")
self.assertEqual(ref.start, 0)
self.assertEqual(ref.stop, 100)
def test_file_in_dir(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G/path/to/a/file.mkv")
ref = node.get_torrent_ref()
self.assertEqual(ref.tracker, "btn")
def test_multiple_files(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"dir/2.mkv", file_index=0, start=0, stop=100)
add_file(conn, path=b"dir/1.mkv", file_index=1, start=100, stop=200)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G/dir/1.mkv")
self.assertEqual(node.stat().size, 100)
self.assertEqual(node.get_torrent_ref().start, 100)
node = fs.lookup(root, "browse/S/G/dir/2.mkv")
self.assertEqual(node.stat().size, 100)
self.assertEqual(node.get_torrent_ref().start, 0)
class TestBrowseBase(unittest.TestCase):
"""Tests for the browse directory itself."""
def test_access(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
stat = node.stat()
self.assertEqual(stat.filetype, stat_lib.S_IFDIR)
def test_readdir(self):
conn = get_mock_db()
add_series(conn, name="S 100", id=100)
add_series(conn, name="S 200", id=200)
add_series(conn, name="S 300", id=300)
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 3)
self.assertEqual(dirents[0].stat.filetype, stat_lib.S_IFDIR)
self.assertEqual(dirents[1].stat.filetype, stat_lib.S_IFDIR)
self.assertEqual(dirents[2].stat.filetype, stat_lib.S_IFDIR)
self.assertEqual(
{d.name for d in dirents}, {"S 100", "S 200", "S 300"}
)
def test_readdir_empty_name(self):
conn = get_mock_db()
add_series(conn, name="S", id=100)
add_series(conn, name="", id=200)
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 1)
self.assertEqual(dirents[0].stat.filetype, stat_lib.S_IFDIR)
self.assertEqual({d.name for d in dirents}, {"S"})
def test_readdir_deleted(self):
conn = get_mock_db()
add_series(conn, name="S 100", id=100)
add_series(conn, name="S 200", id=200, deleted=1)
add_series(conn, name="S 300", id=300)
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 2)
self.assertEqual({d.name for d in dirents}, {"S 100", "S 300"})
def test_readdir_slash(self):
conn = get_mock_db()
add_series(conn, name="S/Slash")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 1)
self.assertEqual(dirents[0].name, "S_Slash")
def test_readdir_under(self):
conn = get_mock_db()
add_series(conn, name="S_Under")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 1)
self.assertEqual(dirents[0].name, "S_Under")
def test_readdir_offset(self):
conn = get_mock_db()
add_series(conn, name="S 100", id=100)
add_series(conn, name="S 200", id=200)
add_series(conn, name="S 300", id=300)
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
dirents = list(node.readdir())
next_dirents = list(node.readdir(offset=dirents[0].next_offset))
self.assertEqual(len(next_dirents), 2)
self.assertEqual(next_dirents[0].name, dirents[1].name)
def test_lookup(self):
conn = get_mock_db()
add_series(conn, name="S 100", id=100)
add_series(conn, name="S 200", id=200)
add_series(conn, name="S 300", id=300)
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
node = node.lookup("S 200")
self.assertNotEqual(node, None)
def test_lookup_deleted(self):
conn = get_mock_db()
add_series(conn, name="S 100", id=100)
add_series(conn, name="S 200", id=200, deleted=1)
add_series(conn, name="S 300", id=300)
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
with self.assertRaises(FileNotFoundError):
node = node.lookup("S 200")
def test_lookup_slash(self):
conn = get_mock_db()
add_series(conn, name="S/Slash")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
node = node.lookup("S_Slash")
self.assertNotEqual(node, None)
def test_lookup_under(self):
conn = get_mock_db()
add_series(conn, name="S_Under")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
node = node.lookup("S_Under")
self.assertNotEqual(node, None)
def test_lookup_noent(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse")
with self.assertRaises(FileNotFoundError):
node.lookup("does_not_exist")
class TestBrowseSeries(unittest.TestCase):
"""Tests for a browse/<series> directory."""
def test_access(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
stat = node.stat()
self.assertEqual(stat.filetype, stat_lib.S_IFDIR)
def test_readdir(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G 110", id=110, series_id=100)
add_group(conn, name="G 120", id=120, series_id=100)
add_group(conn, name="G 130", id=130, series_id=100)
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 3)
self.assertEqual(dirents[0].stat.filetype, stat_lib.S_IFDIR)
self.assertEqual(dirents[1].stat.filetype, stat_lib.S_IFDIR)
self.assertEqual(dirents[2].stat.filetype, stat_lib.S_IFDIR)
self.assertEqual(
{d.name for d in dirents}, {"G 110", "G 120", "G 130"}
)
def test_readdir_empty_name(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G", id=110, series_id=100)
add_group(conn, name="", id=120, series_id=100)
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 1)
self.assertEqual(dirents[0].stat.filetype, stat_lib.S_IFDIR)
self.assertEqual({d.name for d in dirents}, {"G"})
def test_readdir_deleted(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G 110", id=110, series_id=100)
add_group(conn, name="G 120", id=120, series_id=100, deleted=1)
add_group(conn, name="G 130", id=130, series_id=100)
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 2)
self.assertEqual({d.name for d in dirents}, {"G 110", "G 130"})
def test_readdir_slash(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G/Slash")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 1)
self.assertEqual(dirents[0].name, "G_Slash")
def test_readdir_under(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G_Under")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
dirents = list(node.readdir())
self.assertEqual(len(dirents), 1)
self.assertEqual(dirents[0].name, "G_Under")
def test_readdir_offset(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G 110", id=110, series_id=100)
add_group(conn, name="G 120", id=120, series_id=100)
add_group(conn, name="G 130", id=130, series_id=100)
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
dirents = list(node.readdir())
next_dirents = list(node.readdir(offset=dirents[0].next_offset))
self.assertEqual(len(next_dirents), 2)
self.assertEqual(next_dirents[0].name, dirents[1].name)
def test_lookup(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G 110", id=110, series_id=100)
add_group(conn, name="G 120", id=120, series_id=100)
add_group(conn, name="G 130", id=130, series_id=100)
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
node = node.lookup("G 110")
self.assertNotEqual(node, None)
def test_lookup_deleted(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G 110", id=110, series_id=100)
add_group(conn, name="G 120", id=120, series_id=100, deleted=1)
add_group(conn, name="G 130", id=130, series_id=100)
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
with self.assertRaises(FileNotFoundError):
node.lookup("G 120")
def test_lookup_slash(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G/Slash")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
node = node.lookup("G_Slash")
self.assertNotEqual(node, None)
def test_lookup_under(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G_Under")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
node = node.lookup("G_Under")
self.assertNotEqual(node, None)
def test_lookup_noent(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn)
add_file(conn, path=b"path/to/a/file.mkv", start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S")
with self.assertRaises(FileNotFoundError):
node.lookup("does_not_exist")
class TestGroupSubdirBase(unittest.TestCase):
"""Tests for a browse/<series>/<group> directory."""
def test_access(self):
conn = get_mock_db()
# Readability
def file_(**kwargs):
add_file(conn, **kwargs)
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, id=111)
add_entry(conn, id=112)
file_(id=111, path=b"b1/1.mkv", file_index=0, start=0, stop=100)
file_(id=111, path=b"b1/2.mkv", file_index=1, start=100, stop=200)
file_(id=111, path=b"a1/1.mkv", file_index=2, start=200, stop=300)
file_(id=111, path=b"a1/2.mkv", file_index=3, start=300, stop=400)
file_(id=112, path=b"a2.mkv", file_index=0, start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G")
stat = node.stat()
self.assertEqual(stat.filetype, stat_lib.S_IFDIR)
def test_readdir(self):
conn = get_mock_db()
# Readability
def file_(**kwargs):
add_file(conn, **kwargs)
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, id=111)
add_entry(conn, id=112)
file_(id=111, path=b"b1/1.mkv", file_index=0, start=0, stop=100)
file_(id=111, path=b"b1/2.mkv", file_index=1, start=100, stop=200)
file_(id=111, path=b"a1/1.mkv", file_index=2, start=200, stop=300)
file_(id=111, path=b"a1/2.mkv", file_index=3, start=300, stop=400)
file_(id=112, path=b"a2.mkv", file_index=0, start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G")
dirents = node.readdir()
dirents = sorted(dirents, key=lambda d: d.name)
self.assertEqual([d.name for d in dirents], ["a1", "a2.mkv", "b1"])
self.assertEqual(dirents[0].stat.filetype, stat_lib.S_IFDIR)
self.assertEqual(dirents[1].stat.filetype, stat_lib.S_IFREG)
self.assertEqual(dirents[2].stat.filetype, stat_lib.S_IFDIR)
def test_readdir_offset(self):
conn = get_mock_db()
# Readability
def file_(**kwargs):
add_file(conn, **kwargs)
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, id=111)
add_entry(conn, id=112)
file_(id=111, path=b"b1/1.mkv", file_index=0, start=0, stop=100)
file_(id=111, path=b"b1/2.mkv", file_index=1, start=100, stop=200)
file_(id=111, path=b"a1/1.mkv", file_index=2, start=200, stop=300)
file_(id=111, path=b"a1/2.mkv", file_index=3, start=300, stop=400)
file_(id=112, path=b"a2.mkv", file_index=0, start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G")
dirents = list(node.readdir())
next_dirents = list(node.readdir(offset=dirents[0].next_offset))
self.assertEqual(len(next_dirents), len(dirents) - 1)
self.assertEqual(dirents[1:], next_dirents)
def test_lookup(self):
conn = get_mock_db()
# Readability
def file_(**kwargs):
add_file(conn, **kwargs)
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, id=111)
add_entry(conn, id=112)
file_(id=111, path=b"b1/1.mkv", file_index=0, start=0, stop=100)
file_(id=111, path=b"b1/2.mkv", file_index=1, start=100, stop=200)
file_(id=111, path=b"a1/1.mkv", file_index=2, start=200, stop=300)
file_(id=111, path=b"a1/2.mkv", file_index=3, start=300, stop=400)
file_(id=112, path=b"a2.mkv", file_index=0, start=0, stop=100)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G")
sub = node.lookup("a1")
self.assertEqual(sub.stat().filetype, stat_lib.S_IFDIR)
sub = node.lookup("b1")
self.assertEqual(sub.stat().filetype, stat_lib.S_IFDIR)
sub = node.lookup("a2.mkv")
self.assertEqual(sub.stat().filetype, stat_lib.S_IFREG)
def test_lookup_noent(self):
conn = get_mock_db()
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn)
add_file(
conn, id=111, path=b"a1/1.mkv", file_index=0, start=0, stop=100
)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G")
with self.assertRaises(FileNotFoundError):
node.lookup("does_not_exist")
class TestGroupSubdir(unittest.TestCase):
"""Tests for a nontrivial subdirectory of browse/<series>/<group>."""
def test_access(self):
conn = get_mock_db()
# Readability
def file_(**kwargs):
add_file(conn, **kwargs)
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, id=111)
add_entry(conn, id=112)
file_(id=111, path=b"b/1.mkv", file_index=0, start=0, stop=100)
file_(id=111, path=b"a/1.mkv", file_index=1, start=100, stop=200)
file_(id=112, path=b"b/c/2.mkv", file_index=0, start=0, stop=100)
file_(id=112, path=b"a/c/2.mkv", file_index=1, start=100, stop=200)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G/a")
stat = node.stat()
self.assertEqual(stat.filetype, stat_lib.S_IFDIR)
def test_readdir(self):
conn = get_mock_db()
# Readability
def file_(**kwargs):
add_file(conn, **kwargs)
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, id=111)
add_entry(conn, id=112)
file_(id=111, path=b"b/1.mkv", file_index=0, start=0, stop=100)
file_(id=111, path=b"a/1.mkv", file_index=1, start=100, stop=200)
file_(id=112, path=b"b/c/2.mkv", file_index=0, start=0, stop=100)
file_(id=112, path=b"a/c/2.mkv", file_index=1, start=100, stop=200)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G/a")
dirents = list(node.readdir())
dirents = sorted(dirents, key=lambda d: d.name)
self.assertEqual([d.name for d in dirents], ["1.mkv", "c"])
self.assertEqual(dirents[0].stat.filetype, stat_lib.S_IFREG)
self.assertEqual(dirents[1].stat.filetype, stat_lib.S_IFDIR)
def test_readdir_offset(self):
conn = get_mock_db()
# Readability
def file_(**kwargs):
add_file(conn, **kwargs)
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, id=111)
add_entry(conn, id=112)
file_(id=111, path=b"b/1.mkv", file_index=0, start=0, stop=100)
file_(id=111, path=b"a/1.mkv", file_index=1, start=100, stop=200)
file_(id=112, path=b"b/c/2.mkv", file_index=0, start=0, stop=100)
file_(id=112, path=b"a/c/2.mkv", file_index=1, start=100, stop=200)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G/a")
dirents = list(node.readdir())
next_dirents = list(node.readdir(offset=dirents[0].next_offset))
self.assertEqual(len(next_dirents), len(dirents) - 1)
self.assertEqual(dirents[1:], next_dirents)
def test_lookup(self):
conn = get_mock_db()
# Readability
def file_(**kwargs):
add_file(conn, **kwargs)
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, id=111)
add_entry(conn, id=112)
file_(id=111, path=b"b/1.mkv", file_index=0, start=0, stop=100)
file_(id=111, path=b"a/1.mkv", file_index=1, start=100, stop=200)
file_(id=112, path=b"b/c/2.mkv", file_index=0, start=0, stop=100)
file_(id=112, path=b"a/c/2.mkv", file_index=1, start=100, stop=200)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G/a")
sub = node.lookup("1.mkv")
self.assertEqual(sub.stat().filetype, stat_lib.S_IFREG)
sub = node.lookup("c")
self.assertEqual(sub.stat().filetype, stat_lib.S_IFDIR)
def test_lookup_noent(self):
conn = get_mock_db()
# Readability
def file_(**kwargs):
add_file(conn, **kwargs)
add_series(conn, name="S")
add_group(conn, name="G")
add_entry(conn, id=111)
add_entry(conn, id=112)
file_(id=111, path=b"b/1.mkv", file_index=0, start=0, stop=100)
file_(id=111, path=b"a/1.mkv", file_index=1, start=100, stop=200)
file_(id=112, path=b"b/c/2.mkv", file_index=0, start=0, stop=100)
file_(id=112, path=b"a/c/2.mkv", file_index=1, start=100, stop=200)
root = tvaf_btn.RootDir(conn)
node = fs.lookup(root, "browse/S/G/a")
with self.assertRaises(FileNotFoundError):
node.lookup("does_not_exist")
| 34.53856
| 79
| 0.608202
| 4,005
| 26,871
| 3.931835
| 0.060175
| 0.048771
| 0.040452
| 0.051819
| 0.854829
| 0.849368
| 0.835969
| 0.831269
| 0.826951
| 0.810186
| 0
| 0.050823
| 0.24506
| 26,871
| 777
| 80
| 34.583012
| 0.725426
| 0.074355
| 0
| 0.80212
| 0
| 0
| 0.07275
| 0.001174
| 0
| 0
| 0
| 0.001287
| 0.132509
| 1
| 0.091873
| false
| 0
| 0.014134
| 0
| 0.116608
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b3830d0b2b0abda80881bf81dafec3bf6ca201b9
| 164
|
py
|
Python
|
httpbin/routers/inspection/__init__.py
|
imleowoo/fastapi-httpbin
|
3f563b941df59dd1889434c4416d80979c591832
|
[
"MIT"
] | null | null | null |
httpbin/routers/inspection/__init__.py
|
imleowoo/fastapi-httpbin
|
3f563b941df59dd1889434c4416d80979c591832
|
[
"MIT"
] | null | null | null |
httpbin/routers/inspection/__init__.py
|
imleowoo/fastapi-httpbin
|
3f563b941df59dd1889434c4416d80979c591832
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from httpbin.routers.inspection import request as request_inspection
from httpbin.routers.inspection import response as response_inspection
| 41
| 70
| 0.817073
| 21
| 164
| 6.285714
| 0.52381
| 0.166667
| 0.272727
| 0.424242
| 0.515152
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006803
| 0.103659
| 164
| 3
| 71
| 54.666667
| 0.891156
| 0.128049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b3e385f5c65dd6d3fb5eb3ab2c408c90dbccca0c
| 106
|
py
|
Python
|
pyregex/mod_four/__init__.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 3
|
2017-05-02T10:28:13.000Z
|
2019-02-06T09:10:11.000Z
|
pyregex/mod_four/__init__.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 2
|
2017-06-21T20:39:14.000Z
|
2020-02-25T10:28:57.000Z
|
pyregex/mod_four/__init__.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 2
|
2016-07-29T04:35:22.000Z
|
2017-01-18T17:05:36.000Z
|
import re
def mod(test):
return re.compile('^\[\W?(?:[048]|[0-9]*(?:[02468][048]|[13579][26]))\]$')
| 17.666667
| 78
| 0.509434
| 16
| 106
| 3.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 0.113208
| 106
| 5
| 79
| 21.2
| 0.361702
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 8
|
b3f9fd803c6fab9357495f4d21cf3bb7870c4b50
| 3,171
|
py
|
Python
|
hyper_resource/serializers.py
|
RogerioBorba/kanban
|
862b32e415dd93833f26bc651b951085eca969f0
|
[
"Apache-2.0"
] | null | null | null |
hyper_resource/serializers.py
|
RogerioBorba/kanban
|
862b32e415dd93833f26bc651b951085eca969f0
|
[
"Apache-2.0"
] | 9
|
2020-02-11T21:48:08.000Z
|
2022-01-13T00:33:58.000Z
|
hyper_resource/serializers.py
|
RogerioBorba/kanban
|
862b32e415dd93833f26bc651b951085eca969f0
|
[
"Apache-2.0"
] | 1
|
2017-11-06T20:27:33.000Z
|
2017-11-06T20:27:33.000Z
|
from rest_framework.serializers import ModelSerializer
from rest_framework_gis.serializers import GeoFeatureModelSerializer
class BusinessSerializer(ModelSerializer):
def get_id_relationship_from_request(self, field_name_relationship):
if field_name_relationship not in self.initial_data:
return None
field_iri = self.initial_data[field_name_relationship]
if field_iri != None and field_iri != '':
arr = field_iri.split('/')
return arr[-1] if arr[-1] != '' else arr[-2]
return None
def field_relationship_to_validate_dict(self):
return {}
def transform_relationship_from_request(self, validated_data):
for key, value in self.field_relationship_to_validate_dict().items():
validated_data[key] = self.get_id_relationship_from_request(value)
def create_or_update(self, instance, validated_data):
an_instance = instance
self.transform_relationship_from_request(validated_data)
if an_instance is None:
an_instance = super(BusinessSerializer, self).create(validated_data)
else:
an_instance = super(BusinessSerializer, self).update(instance, validated_data)
for key, value in self.field_relationship_to_validate_dict().items():
setattr(an_instance, key, validated_data[key])
return an_instance
def create(self, validated_data):
return self.create_or_update(None, validated_data)
def update(self, instance, validated_data):
return self.create_or_update(instance, validated_data)
class GeoBusinessSerializer(GeoFeatureModelSerializer):
def get_id_relationship_from_request(self, field_name_relationship):
if field_name_relationship not in self.initial_data:
return None
field_iri = self.initial_data[field_name_relationship]
if field_iri != None and field_iri != '':
arr = field_iri.split('/')
return arr[-1] if arr[-1] != '' else arr[-2]
return None
def field_relationship_to_validate_dict(self):
return {}
def transform_relationship_from_request(self, validated_data):
for key, value in self.field_relationship_to_validate_dict().items():
validated_data[key] = self.get_id_relationship_from_request(value)
def create_or_update(self, instance, validated_data):
an_instance = instance
self.transform_relationship_from_request(validated_data)
if an_instance is None:
an_instance = super(GeoBusinessSerializer, self).create(validated_data)
else:
an_instance = super(GeoBusinessSerializer, self).update(instance, validated_data)
for key, value in self.field_relationship_to_validate_dict().items():
setattr(an_instance, key, validated_data[key])
return an_instance
def create(self, validated_data):
return self.create_or_update(None, validated_data)
def update(self, instance, validated_data):
return self.create_or_update(instance, validated_data)
| 42.28
| 94
| 0.692211
| 373
| 3,171
| 5.549598
| 0.134048
| 0.138164
| 0.088889
| 0.078261
| 0.903382
| 0.8657
| 0.8657
| 0.8657
| 0.825121
| 0.825121
| 0
| 0.002462
| 0.231473
| 3,171
| 75
| 95
| 42.28
| 0.846943
| 0
| 0
| 0.862069
| 0
| 0
| 0.000646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.206897
| false
| 0
| 0.034483
| 0.103448
| 0.517241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
b3fbc6ba26b89612406d3701f1bb28f0a1c2da98
| 88
|
py
|
Python
|
nougat/context/__init__.py
|
Riparo/nougat
|
8453bc37e0b782f296952f0a418532ebbbcd74f3
|
[
"MIT"
] | 12
|
2018-07-14T06:32:34.000Z
|
2020-12-23T07:47:33.000Z
|
nougat/context/__init__.py
|
Riparo/nougat
|
8453bc37e0b782f296952f0a418532ebbbcd74f3
|
[
"MIT"
] | 4
|
2018-06-15T13:55:34.000Z
|
2021-06-01T22:21:13.000Z
|
nougat/context/__init__.py
|
NougatWeb/nougat
|
8453bc37e0b782f296952f0a418532ebbbcd74f3
|
[
"MIT"
] | 1
|
2020-02-16T17:25:49.000Z
|
2020-02-16T17:25:49.000Z
|
from nougat.context.request import Request
from nougat.context.response import Response
| 29.333333
| 44
| 0.863636
| 12
| 88
| 6.333333
| 0.5
| 0.263158
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 88
| 2
| 45
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3752feb82eb1732ed0f929df62da380ef2e12f4a
| 7,831
|
py
|
Python
|
Sistema de notas/Main project.py
|
Vinleit/sys-notas
|
a0c285cd4b6371157bbb940285537537a5db7f49
|
[
"MIT"
] | null | null | null |
Sistema de notas/Main project.py
|
Vinleit/sys-notas
|
a0c285cd4b6371157bbb940285537537a5db7f49
|
[
"MIT"
] | null | null | null |
Sistema de notas/Main project.py
|
Vinleit/sys-notas
|
a0c285cd4b6371157bbb940285537537a5db7f49
|
[
"MIT"
] | null | null | null |
from PySimpleGUI import PySimpleGUI as sg
import pandas as pd
import matplotlib.pyplot as plt
import win32com.client as win32
import os.path
#LAYOUT DO INICIO
sg.theme("SystemDefault")
layout_inicio = [
[sg.Text("Turma:" , font="Helvetica") , sg.Input(key="Turma" , size=(35 , 0))],
[sg.Text("Quantas notas?" , font="Helvetica")],
[sg.Radio("2 Notas" , font="Helvetica", key="2_notas" , group_id="Notas") , sg.Radio("3 notas" , font="Helvetica" , key="3_notas" , group_id="Notas")],
[sg.Button("Continuar" , font="Helvetica" , key="Continuar")]
]
#JANELA
janela_inicio = sg.Window("Cadastro de Notas" , layout=layout_inicio , element_justification="center")
#Eventos DO INICIO
turma = str()
while True:
eventos , valores = janela_inicio.read()
if eventos == sg.WIN_CLOSED:
break
if eventos == "Continuar":
turma = valores["Turma"]
if valores["2_notas"]:
janela_inicio.close()
# LAYOUT DO MAIN
sg.theme("SystemDefault")
layout_main = [
[sg.Text("Nome:", font="Helvetica"), sg.Input(key="Nome", size=(48, 0))],
[sg.Text("P1:", font="Helvetica"), sg.Input(key="P1", size=(50, 0))],
[sg.Text("TB:", font="Helvetica"), sg.Input(key="TB", size=(50, 0))],
[sg.Button("Adicionar", size=(35, 0), font="Helvetica", ),
sg.Button("Salvar e encerrar", size=(15, 0) , font="Helvetica")]
]
# JANELA
janela_main = sg.Window("Cadastro de Notas", layout=layout_main)
# EVENTOS DA MAIN
nomes_e_notas = [["NOME", "P1", "TB", "MÉDIA"], ]
c = 1
while True:
eventos, valores = janela_main.read()
if eventos == sg.WIN_CLOSED:
break
if eventos == "Adicionar":
nomes_e_notas.append(list())
nomes_e_notas[c].append(valores["Nome"])
nomes_e_notas[c].append(float(valores["P1"]))
nomes_e_notas[c].append(float(valores["TB"]))
nomes_e_notas[c].append((float(valores["P1"]) + float(valores["TB"])) / 2)
c += 1
janela_main.FindElement("Nome").Update('')
janela_main.FindElement("P1").Update('')
janela_main.FindElement("P1").Update('')
janela_main.FindElement("TB").Update('')
if eventos == "Salvar e encerrar":
janela_main.close()
# EXCEL
dados = pd.DataFrame(nomes_e_notas)
dados.to_excel(excel_writer=f"Planilha {turma}.xlsx", sheet_name="pg.1", index=False, header=False)
# GRÁFICO
x_nomes = []
for c in range(1, len(nomes_e_notas)):
x_nomes.append(nomes_e_notas[c][0])
y_media = []
for c in range(1, len(nomes_e_notas)):
y_media.append(nomes_e_notas[c][3])
plt.bar(x_nomes, y_media, color="cyan" , edgecolor="darkblue" , linewidth=2)
plt.title(f"Gráfico dos alunos do {turma}")
plt.xlabel("NOMES")
plt.ylabel("NOTAS")
plt.savefig(f"Grafico {turma}.png")
# # MANDANDO POR E-MAIL
# outlook = win32.Dispatch("outlook.application")
# email = outlook.CreateItem(0)
#
# email.To = "josievinileite@gmail.com"
# email.Subject = "Notas da turma"
# email.HTMLBody = """"
# <p>Segue em anexo as notas da turma e o gráfico de desempenho</p>
# """
#
# anexo_excel = f"{os.path.abspath(f'Planilha {turma}.xlsx')}"
# email.Attachments.Add(anexo_excel)
#
# anexo_grafico = f"{os.path.abspath(f'Grafico {turma}.png')}"
# email.Attachments.Add(anexo_grafico)
#
# email.Send()
elif valores["3_notas"]:
janela_inicio.close()
# LAYOUT DO MAIN
sg.theme("SystemDefault")
layout_main = [
[sg.Text("Nome:", font="Helvetica"), sg.Input(key="Nome", size=(48, 0))],
[sg.Text("P1:", font="Helvetica"), sg.Input(key="P1", size=(50, 0))],
[sg.Text("P2:", font="Helvetica"), sg.Input(key="P2", size=(50, 0))],
[sg.Text("TB:", font="Helvetica"), sg.Input(key="TB", size=(50, 0))],
[sg.Button("Adicionar", size=(35, 0), font="Helvetica"),
sg.Button("Salvar e encerrar", size=(15, 0) , font="Helvetica")]
]
# JANELA
janela_main = sg.Window("Cadastro de Notas", layout=layout_main)
# EVENTOS DA MAIN
nomes_e_notas = [["NOME", "P1", "P2", "TB", "MÉDIA"], ]
c = 1
while True:
eventos, valores = janela_main.read()
if eventos == sg.WIN_CLOSED:
break
if eventos == "Adicionar":
nomes_e_notas.append(list())
nomes_e_notas[c].append(valores["Nome"])
nomes_e_notas[c].append(float(valores["P1"]))
nomes_e_notas[c].append(float(valores["P2"]))
nomes_e_notas[c].append(float(valores["TB"]))
nomes_e_notas[c].append(float(valores["P1"]) + float(valores["P2"]) + float(valores["TB"]) / 3)
c += 1
janela_main.FindElement("Nome").Update('')
janela_main.FindElement("P1").Update('')
janela_main.FindElement("P2").Update('')
janela_main.FindElement("TB").Update('')
if eventos == "Salvar e encerrar":
janela_main.close()
# EXCEL
dados = pd.DataFrame(nomes_e_notas)
dados.to_excel(excel_writer=f"Planilha {turma}.xlsx", sheet_name="pg.1", index=False, header=False)
# GRÁFICO
x_nomes = []
for c in range(1, len(nomes_e_notas)):
x_nomes.append(nomes_e_notas[c][0])
y_media = []
for c in range(1, len(nomes_e_notas)):
y_media.append(nomes_e_notas[c][4])
plt.bar(x_nomes , y_media, color="cyan" , edgecolor="darkblue" , linewidth=2)
plt.title(f"Gráfico dos alunos do {turma}")
plt.xlabel("NOMES")
plt.ylabel("NOTAS")
plt.savefig(f"Grafico {turma}.png")
# # MANDANDO POR E-MAIL
# outlook = win32.Dispatch("outlook.application")
# email = outlook.CreateItem(0)
#
# email.To = "josievinileite@gmail.com"
# email.Subject = "Notas da turma"
# email.HTMLBody = """"
# <p>Segue em anexo as notas da turma e o gráfico de desempenho</p>
# """
#
# anexo_excel = f"{os.path.abspath(f'Planilha {turma}.xlsx')}"
# email.Attachments.Add(anexo_excel)
#
# anexo_grafico = f"{os.path.abspath(f'Grafico {turma}.png')}"
# email.Attachments.Add(anexo_grafico)
#
# email.Send()
| 42.102151
| 155
| 0.478611
| 830
| 7,831
| 4.387952
| 0.16747
| 0.037891
| 0.069467
| 0.042834
| 0.869577
| 0.83855
| 0.83855
| 0.82894
| 0.82894
| 0.817957
| 0
| 0.018332
| 0.380028
| 7,831
| 186
| 156
| 42.102151
| 0.731823
| 0.138041
| 0
| 0.712963
| 0
| 0
| 0.113697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.046296
| 0
| 0.046296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
807c0b3db5f142b91d54609fbee4af46e42f1680
| 14,509
|
py
|
Python
|
examples/librispeech/metrics/ctc.py
|
sundogrd/tensorflow_end2end_speech_recognition
|
61e4a65fb5c9f3d9f690d713dcd77a48b1de0a14
|
[
"MIT"
] | 351
|
2017-05-27T08:31:27.000Z
|
2022-03-03T16:47:27.000Z
|
examples/librispeech/metrics/ctc.py
|
eLavin11/tensorflow_end2end_speech_recognition
|
65b9728089d5e92b25b92384a67419d970399a64
|
[
"MIT"
] | 19
|
2017-07-19T13:12:18.000Z
|
2019-06-12T06:07:13.000Z
|
examples/librispeech/metrics/ctc.py
|
eLavin11/tensorflow_end2end_speech_recognition
|
65b9728089d5e92b25b92384a67419d970399a64
|
[
"MIT"
] | 127
|
2017-06-12T16:27:21.000Z
|
2021-12-29T02:22:34.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Define evaluation method for CTC model (Librispeech corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tqdm import tqdm
from utils.io.labels.character import Idx2char, Char2idx
from utils.io.labels.word import Idx2word
from utils.io.labels.sparsetensor import sparsetensor2list
from utils.evaluation.edit_distance import compute_cer, compute_wer, wer_align
from models.ctc.decoders.beam_search_decoder import BeamSearchDecoder
def do_eval_cer(session, decode_ops, model, dataset, label_type,
is_test=False, eval_batch_size=None, progressbar=False,
is_multitask=False):
"""Evaluate trained model by Character Error Rate.
Args:
session: session of training model
decode_ops: list of operations for decoding
model: the model to evaluate
dataset: An instance of a `Dataset` class
label_type (string): character or character_capital_divide
is_test (bool, optional): set to True when evaluating by the test set
eval_batch_size (int, optional): the batch size when evaluating the model
progressbar (bool, optional): if True, visualize the progressbar
is_multitask (bool, optional): if True, evaluate the multitask model
Return:
cer_mean (float): An average of CER
wer_mean (float): An average of WER
"""
assert isinstance(decode_ops, list), "decode_ops must be a list."
batch_size_original = dataset.batch_size
# Reset data counter
dataset.reset()
# Set batch size in the evaluation
if eval_batch_size is not None:
dataset.batch_size = eval_batch_size
if label_type == 'character':
idx2char = Idx2char(
map_file_path='../metrics/mapping_files/character.txt')
elif label_type == 'character_capital_divide':
idx2char = Idx2char(
map_file_path='../metrics/mapping_files/character_capital_divide.txt',
capital_divide=True,
space_mark='_')
else:
raise TypeError
cer_mean, wer_mean = 0, 0
skip_data_num = 0
if progressbar:
pbar = tqdm(total=len(dataset))
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
if is_multitask:
inputs, _, labels_true, inputs_seq_len, _ = data
else:
inputs, labels_true, inputs_seq_len, _ = data
feed_dict = {}
for i_device in range(len(decode_ops)):
feed_dict[model.inputs_pl_list[i_device]] = inputs[i_device]
feed_dict[model.inputs_seq_len_pl_list[i_device]
] = inputs_seq_len[i_device]
feed_dict[model.keep_prob_pl_list[i_device]] = 1.0
labels_pred_st_list = session.run(decode_ops, feed_dict=feed_dict)
for i_device, labels_pred_st in enumerate(labels_pred_st_list):
batch_size_device = len(inputs[i_device])
try:
labels_pred = sparsetensor2list(labels_pred_st,
batch_size_device)
for i_batch in range(batch_size_device):
# Convert from list of index to string
if is_test:
str_true = labels_true[i_device][i_batch][0]
# NOTE: transcript is seperated by space('_')
else:
str_true = idx2char(labels_true[i_device][i_batch],
padded_value=dataset.padded_value)
str_pred = idx2char(labels_pred[i_batch])
# Remove consecutive spaces
str_pred = re.sub(r'[_]+', '_', str_pred)
# Remove garbage labels
str_true = re.sub(r'[\']+', '', str_true)
str_pred = re.sub(r'[\']+', '', str_pred)
# Compute WER
wer_mean += compute_wer(ref=str_true.split('_'),
hyp=str_pred.split('_'),
normalize=True)
# substitute, insert, delete = wer_align(
# ref=str_pred.split('_'),
# hyp=str_true.split('_'))
# print('SUB: %d' % substitute)
# print('INS: %d' % insert)
# print('DEL: %d' % delete)
# Remove spaces
str_true = re.sub(r'[_]+', '', str_true)
str_pred = re.sub(r'[_]+', '', str_pred)
# Compute CER
cer_mean += compute_cer(str_pred=str_pred,
str_true=str_true,
normalize=True)
if progressbar:
pbar.update(1)
except IndexError:
print('skipped')
skip_data_num += batch_size_device
# TODO: Conduct decoding again with batch size 1
if progressbar:
pbar.update(batch_size_device)
if is_new_epoch:
break
cer_mean /= (len(dataset) - skip_data_num)
wer_mean /= (len(dataset) - skip_data_num)
# TODO: Fix this
# Register original batch size
if eval_batch_size is not None:
dataset.batch_size = batch_size_original
return cer_mean, wer_mean
def do_eval_cer2(session, posteriors_ops, beam_width, model, dataset,
label_type, is_test=False, eval_batch_size=None,
progressbar=False, is_multitask=False):
"""Evaluate trained model by Character Error Rate.
Args:
session: session of training model
posteriors_ops: list of operations for computing posteriors
beam_width (int):
model: the model to evaluate
dataset: An instance of a `Dataset` class
label_type (string): character or character_capital_divide
is_test (bool, optional): set to True when evaluating by the test set
eval_batch_size (int, optional): the batch size when evaluating the model
progressbar (bool, optional): if True, visualize the progressbar
is_multitask (bool, optional): if True, evaluate the multitask model
Return:
cer_mean (float): An average of CER
wer_mean (float): An average of WER
"""
assert isinstance(posteriors_ops, list), "posteriors_ops must be a list."
batch_size_original = dataset.batch_size
# Reset data counter
dataset.reset()
# Set batch size in the evaluation
if eval_batch_size is not None:
dataset.batch_size = eval_batch_size
if label_type == 'character':
idx2char = Idx2char(
map_file_path='../metrics/mapping_files/character.txt')
char2idx = Char2idx(
map_file_path='../metrics/mapping_files/character.txt')
elif label_type == 'character_capital_divide':
raise NotImplementedError
else:
raise TypeError
# Define decoder
decoder = BeamSearchDecoder(space_index=char2idx('_')[0],
blank_index=model.num_classes - 1)
cer_mean, wer_mean = 0, 0
if progressbar:
pbar = tqdm(total=len(dataset))
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
if is_multitask:
inputs, _, labels_true, inputs_seq_len, _ = data
else:
inputs, labels_true, inputs_seq_len, _ = data
feed_dict = {}
for i_device in range(len(posteriors_ops)):
feed_dict[model.inputs_pl_list[i_device]] = inputs[i_device]
feed_dict[model.inputs_seq_len_pl_list[i_device]
] = inputs_seq_len[i_device]
feed_dict[model.keep_prob_pl_list[i_device]] = 1.0
posteriors_list = session.run(posteriors_ops, feed_dict=feed_dict)
for i_device, labels_pred_st in enumerate(posteriors_list):
batch_size_device, max_time = inputs[i_device].shape[:2]
posteriors = posteriors_list[i_device].reshape(
batch_size_device, max_time, model.num_classes)
for i_batch in range(batch_size_device):
# Decode per utterance
labels_pred, scores = decoder(
probs=posteriors[i_batch:i_batch + 1],
seq_len=inputs_seq_len[i_device][i_batch: i_batch + 1],
beam_width=beam_width)
# Convert from list of index to string
if is_test:
str_true = labels_true[i_device][i_batch][0]
# NOTE: transcript is seperated by space('_')
else:
str_true = idx2char(labels_true[i_device][i_batch],
padded_value=dataset.padded_value)
str_pred = idx2char(labels_pred[0])
# Remove consecutive spaces
str_pred = re.sub(r'[_]+', '_', str_pred)
# Remove garbage labels
str_true = re.sub(r'[\']+', '', str_true)
str_pred = re.sub(r'[\']+', '', str_pred)
# Compute WER
wer_mean += compute_wer(ref=str_true.split('_'),
hyp=str_pred.split('_'),
normalize=True)
# substitute, insert, delete = wer_align(
# ref=str_pred.split('_'),
# hyp=str_true.split('_'))
# print('SUB: %d' % substitute)
# print('INS: %d' % insert)
# print('DEL: %d' % delete)
# Remove spaces
str_true = re.sub(r'[_]+', '', str_true)
str_pred = re.sub(r'[_]+', '', str_pred)
# Compute CER
cer_mean += compute_cer(str_pred=str_pred,
str_true=str_true,
normalize=True)
if progressbar:
pbar.update(1)
if is_new_epoch:
break
cer_mean /= (len(dataset))
wer_mean /= (len(dataset))
# Register original batch size
if eval_batch_size is not None:
dataset.batch_size = batch_size_original
return cer_mean, wer_mean
def do_eval_wer(session, decode_ops, model, dataset, train_data_size,
is_test=False, eval_batch_size=None, progressbar=False,
is_multitask=False):
"""Evaluate trained model by Word Error Rate.
Args:
session: session of training model
decode_ops: list of operations for decoding
model: the model to evaluate
dataset: An instance of `Dataset` class
train_data_size (string): train100h or train460h or train960h
is_test (bool, optional): set to True when evaluating by the test set
eval_batch_size (int, optional): the batch size when evaluating the model
progressbar (bool, optional): if True, visualize progressbar
is_multitask (bool, optional): if True, evaluate the multitask model
Return:
wer_mean (bool): An average of WER
"""
assert isinstance(decode_ops, list), "decode_ops must be a list."
batch_size_original = dataset.batch_size
# Reset data counter
dataset.reset()
# Set batch size in the evaluation
if eval_batch_size is not None:
dataset.batch_size = eval_batch_size
idx2word = Idx2word(
map_file_path='../metrics/mapping_files/word_' + train_data_size + '.txt')
wer_mean = 0
skip_data_num = 0
if progressbar:
pbar = tqdm(total=len(dataset))
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
if is_multitask:
inputs, labels_true, _, inputs_seq_len, _ = data
else:
inputs, labels_true, inputs_seq_len, _ = data
feed_dict = {}
for i_device in range(len(decode_ops)):
feed_dict[model.inputs_pl_list[i_device]] = inputs[i_device]
feed_dict[model.inputs_seq_len_pl_list[i_device]
] = inputs_seq_len[i_device]
feed_dict[model.keep_prob_pl_list[i_device]] = 1.0
labels_pred_st_list = session.run(decode_ops, feed_dict=feed_dict)
for i_device, labels_pred_st in enumerate(labels_pred_st_list):
batch_size_device = len(inputs[i_device])
try:
labels_pred = sparsetensor2list(labels_pred_st,
batch_size_device)
for i_batch in range(batch_size_device):
if is_test:
str_true = labels_true[i_device][i_batch][0]
# NOTE: transcript is seperated by space('_')
else:
str_true = '_'.join(
idx2word(labels_true[i_device][i_batch]))
str_pred = '_'.join(idx2word(labels_pred[i_batch]))
# if len(str_true.split('_')) == 0:
# print(str_true)
# print(str_pred)
# Compute WER
wer_mean += compute_wer(ref=str_true.split('_'),
hyp=str_pred.split('_'),
normalize=True)
# substitute, insert, delete = wer_align(
# ref=str_true.split(' '),
# hyp=str_pred.split(' '))
# print('SUB: %d' % substitute)
# print('INS: %d' % insert)
# print('DEL: %d' % delete)
if progressbar:
pbar.update(1)
except IndexError:
print('skipped')
skip_data_num += batch_size_device
# TODO: Conduct decoding again with batch size 1
if progressbar:
pbar.update(batch_size_device)
if is_new_epoch:
break
wer_mean /= (len(dataset) - skip_data_num)
# Register original batch size
if eval_batch_size is not None:
dataset.batch_size = batch_size_original
return wer_mean
| 38.181579
| 82
| 0.567579
| 1,685
| 14,509
| 4.592285
| 0.121662
| 0.062807
| 0.0252
| 0.011631
| 0.841303
| 0.821272
| 0.809124
| 0.801887
| 0.795425
| 0.781727
| 0
| 0.006574
| 0.34999
| 14,509
| 379
| 83
| 38.282322
| 0.813912
| 0.248742
| 0
| 0.77451
| 0
| 0
| 0.038956
| 0.022998
| 0
| 0
| 0
| 0.005277
| 0.014706
| 1
| 0.014706
| false
| 0
| 0.04902
| 0
| 0.078431
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
80a0ff11317a9dcb6d3dc3128dee6e27a45457c7
| 1,547
|
py
|
Python
|
code/y_delirium/000_mkdirs.py
|
data-intelligence-for-health-lab/delirium_prediction
|
a0a25819ef6c98e32563b4e3b986c1a26fc30ed7
|
[
"MIT"
] | null | null | null |
code/y_delirium/000_mkdirs.py
|
data-intelligence-for-health-lab/delirium_prediction
|
a0a25819ef6c98e32563b4e3b986c1a26fc30ed7
|
[
"MIT"
] | null | null | null |
code/y_delirium/000_mkdirs.py
|
data-intelligence-for-health-lab/delirium_prediction
|
a0a25819ef6c98e32563b4e3b986c1a26fc30ed7
|
[
"MIT"
] | null | null | null |
# --- loading libraries -------------------------------------------------------
import os
# ------------------------------------------------------ loading libraries ----
if os.path.exists('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp') == False:
os.mkdir('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp')
if os.path.exists('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/horizon') == False:
os.mkdir('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/horizon')
if os.path.exists('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/horizon/processed') == False:
os.mkdir('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/horizon/processed')
if os.path.exists('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/horizon/processed_backup') == False:
os.mkdir('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/horizon/processed_backup')
if os.path.exists('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/time_frames') == False:
os.mkdir('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/time_frames')
if os.path.exists('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/time_frames/processed') == False:
os.mkdir('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/time_frames/processed')
if os.path.exists('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/time_frames/processed_backup') == False:
os.mkdir('/project/M-ABeICU176709/delirium/data/aux/y_delirium/temp/time_frames/processed_backup')
| 59.5
| 117
| 0.721396
| 211
| 1,547
| 5.175355
| 0.104265
| 0.102564
| 0.25641
| 0.358974
| 0.96337
| 0.96337
| 0.96337
| 0.96337
| 0.96337
| 0.96337
| 0
| 0.057377
| 0.053652
| 1,547
| 25
| 118
| 61.88
| 0.688525
| 0.100194
| 0
| 0
| 0
| 0
| 0.739193
| 0.739193
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
80a90175f3313d5d3ace47f6cf65988396f06277
| 4,394
|
py
|
Python
|
alaska2/models/srm_filter_kernel.py
|
simphide/Kaggle-2020-Alaska2
|
3c1f5e8e564c9f04423beef69244fc74168f88ca
|
[
"MIT"
] | 21
|
2020-08-09T11:30:16.000Z
|
2021-06-28T14:15:08.000Z
|
alaska2/models/srm_filter_kernel.py
|
simphide/Kaggle-2020-Alaska2
|
3c1f5e8e564c9f04423beef69244fc74168f88ca
|
[
"MIT"
] | 11
|
2020-08-09T15:30:54.000Z
|
2022-02-10T07:34:39.000Z
|
alaska2/models/srm_filter_kernel.py
|
simphide/Kaggle-2020-Alaska2
|
3c1f5e8e564c9f04423beef69244fc74168f88ca
|
[
"MIT"
] | 3
|
2020-08-09T14:29:03.000Z
|
2021-05-27T13:07:12.000Z
|
import numpy as np
filter_class_1 = [
np.array([[1, 0, 0], [0, -1, 0], [0, 0, 0]], dtype=np.float32),
np.array([[0, 1, 0], [0, -1, 0], [0, 0, 0]], dtype=np.float32),
np.array([[0, 0, 1], [0, -1, 0], [0, 0, 0]], dtype=np.float32),
np.array([[0, 0, 0], [1, -1, 0], [0, 0, 0]], dtype=np.float32),
np.array([[0, 0, 0], [0, -1, 1], [0, 0, 0]], dtype=np.float32),
np.array([[0, 0, 0], [0, -1, 0], [1, 0, 0]], dtype=np.float32),
np.array([[0, 0, 0], [0, -1, 0], [0, 1, 0]], dtype=np.float32),
np.array([[0, 0, 0], [0, -1, 0], [0, 0, 1]], dtype=np.float32),
]
filter_class_2 = [
np.array([[1, 0, 0], [0, -2, 0], [0, 0, 1]], dtype=np.float32),
np.array([[0, 1, 0], [0, -2, 0], [0, 1, 0]], dtype=np.float32),
np.array([[0, 0, 1], [0, -2, 0], [1, 0, 0]], dtype=np.float32),
np.array([[0, 0, 0], [1, -2, 1], [0, 0, 0]], dtype=np.float32),
]
filter_class_3 = [
np.array(
[[-1, 0, 0, 0, 0], [0, 3, 0, 0, 0], [0, 0, -3, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0]], dtype=np.float32
),
np.array(
[[0, 0, -1, 0, 0], [0, 0, 3, 0, 0], [0, 0, -3, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]], dtype=np.float32
),
np.array(
[[0, 0, 0, 0, -1], [0, 0, 0, 3, 0], [0, 0, -3, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0]], dtype=np.float32
),
np.array(
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, -3, 3, -1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], dtype=np.float32
),
np.array(
[[0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, -3, 0, 0], [0, 0, 0, 3, 0], [0, 0, 0, 0, -1]], dtype=np.float32
),
np.array(
[[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, -3, 0, 0], [0, 0, 3, 0, 0], [0, 0, -1, 0, 0]], dtype=np.float32
),
np.array(
[[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, -3, 0, 0], [0, 3, 0, 0, 0], [-1, 0, 0, 0, 0]], dtype=np.float32
),
np.array(
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [-1, 3, -3, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], dtype=np.float32
),
]
filter_edge_3x3 = [
np.array([[-1, 2, -1], [2, -4, 2], [0, 0, 0]], dtype=np.float32),
np.array([[0, 2, -1], [0, -4, 2], [0, 2, -1]], dtype=np.float32),
np.array([[0, 0, 0], [2, -4, 2], [-1, 2, -1]], dtype=np.float32),
np.array([[-1, 2, 0], [2, -4, 0], [-1, 2, 0]], dtype=np.float32),
]
filter_edge_5x5 = [
np.array(
[[-1, 2, -2, 2, -1], [2, -6, 8, -6, 2], [-2, 8, -12, 8, -2], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
dtype=np.float32,
),
np.array(
[[0, 0, -2, 2, -1], [0, 0, 8, -6, 2], [0, 0, -12, 8, -2], [0, 0, 8, -6, 2], [0, 0, -2, 2, -1]],
dtype=np.float32,
),
np.array(
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [-2, 8, -12, 8, -2], [2, -6, 8, -6, 2], [-1, 2, -2, 2, -1]],
dtype=np.float32,
),
np.array(
[[-1, 2, -2, 0, 0], [2, -6, 8, 0, 0], [-2, 8, -12, 0, 0], [2, -6, 8, 0, 0], [-1, 2, -2, 0, 0]],
dtype=np.float32,
),
]
square_3x3 = np.array([[-1, 2, -1], [2, -4, 2], [-1, 2, -1]], dtype=np.float32)
square_5x5 = np.array(
[[-1, 2, -2, 2, -1], [2, -6, 8, -6, 2], [-2, 8, -12, 8, -2], [2, -6, 8, -6, 2], [-1, 2, -2, 2, -1]],
dtype=np.float32,
)
all_hpf_list = (
filter_class_1 + filter_class_2 + filter_class_3 + filter_edge_3x3 + filter_edge_5x5 + [square_3x3, square_5x5]
)
hpf_3x3_list = filter_class_1 + filter_class_2 + filter_edge_3x3 + [square_3x3]
hpf_5x5_list = filter_class_3 + filter_edge_5x5 + [square_5x5]
normalized_filter_class_2 = [hpf / 2 for hpf in filter_class_2]
normalized_filter_class_3 = [hpf / 3 for hpf in filter_class_3]
normalized_filter_edge_3x3 = [hpf / 4 for hpf in filter_edge_3x3]
normalized_square_3x3 = square_3x3 / 4
normalized_filter_edge_5x5 = [hpf / 12 for hpf in filter_edge_5x5]
normalized_square_5x5 = square_5x5 / 12
all_normalized_hpf_list = (
filter_class_1
+ normalized_filter_class_2
+ normalized_filter_class_3
+ normalized_filter_edge_3x3
+ normalized_filter_edge_5x5
+ [normalized_square_3x3, normalized_square_5x5]
)
normalized_hpf_3x3_list = (
filter_class_1 + normalized_filter_class_2 + normalized_filter_edge_3x3 + [normalized_square_3x3]
)
normalized_hpf_5x5_list = normalized_filter_class_3 + normalized_filter_edge_5x5 + [normalized_square_5x5]
normalized_3x3_list = normalized_filter_edge_3x3 + [normalized_square_3x3]
normalized_5x5_list = normalized_filter_edge_5x5 + [normalized_square_5x5]
| 38.208696
| 115
| 0.507738
| 840
| 4,394
| 2.504762
| 0.032143
| 0.205323
| 0.208175
| 0.18251
| 0.861217
| 0.801806
| 0.721008
| 0.589354
| 0.514259
| 0.459125
| 0
| 0.191976
| 0.234183
| 4,394
| 114
| 116
| 38.54386
| 0.433284
| 0
| 0
| 0.298969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010309
| 0
| 0.010309
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0390d074ab92039e7c94aa4f970f5ee7af468623
| 2,714
|
py
|
Python
|
craynn/viz/img_watcher.py
|
maxim-borisyak/craynn
|
fceabd33f5969033fb3605f894778c42c42f3e08
|
[
"MIT"
] | null | null | null |
craynn/viz/img_watcher.py
|
maxim-borisyak/craynn
|
fceabd33f5969033fb3605f894778c42c42f3e08
|
[
"MIT"
] | null | null | null |
craynn/viz/img_watcher.py
|
maxim-borisyak/craynn
|
fceabd33f5969033fb3605f894778c42c42f3e08
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
__all__ = [
'ImgWatcher', 'SImgWatcher'
]
try:
from IPython import display
except ImportError:
display = None
class ImgWatcher(object):
def __init__(self,
n_rows=3, img_size=(128, 128), cmap1=plt.cm.gray_r, cmap2=plt.cm.gray_r, fig_size=3,
vmin=None, vmax=None):
self.fig = plt.figure(figsize=(fig_size * 2 + 1, fig_size * n_rows + n_rows - 1))
self.vmin = vmin
self.vmax = vmax
def add_image(j, cmap):
ax = self.fig.add_subplot(n_rows, 2, j)
ax.grid('off')
im = ax.imshow(
np.random.uniform(size=img_size), interpolation='None', cmap=cmap,
vmin=vmin, vmax=vmax
)
cb = self.fig.colorbar(im)
return im, cb
self.first_column = [
add_image(i * 2 + 1, cmap1)
for i in range(n_rows)
]
self.second_column = [
add_image(i * 2 + 2, cmap2)
for i in range(n_rows)
]
def draw(self, imgs1, imgs2):
for col, imgs in zip([self.first_column, self.second_column], [imgs1, imgs2]):
for i, (im, cb) in enumerate(col):
img = imgs[i]
im.set_data(img)
vmin = self.vmin if self.vmin is not None else np.min(img)
vmax = self.vmax if self.vmax is not None else np.max(img)
im.set_clim(vmin, vmax)
cb.set_clim(vmin, vmax)
cb.update_normal(im)
self.fig.canvas.draw()
class SImgWatcher(object):
def __init__(self,
n_rows=3, img_size=(128, 128), cmap1=plt.cm.gray_r, cmap2=plt.cm.gray_r, fig_size=3,
vmin=None, vmax=None):
self.fig = plt.figure(figsize=(fig_size * 2 + 1, fig_size * n_rows + n_rows - 1))
self.vmin = vmin
self.vmax = vmax
def add_image(j, cmap):
ax = self.fig.add_subplot(n_rows, 2, j)
ax.grid('off')
im = ax.imshow(
np.random.uniform(size=img_size), interpolation='None', cmap=cmap,
vmin=vmin, vmax=vmax
)
cb = self.fig.colorbar(im)
return im, cb
self.first_column = [
add_image(i * 2 + 1, cmap1)
for i in range(n_rows)
]
self.second_column = [
add_image(i * 2 + 2, cmap2)
for i in range(n_rows)
]
def draw(self, imgs1, imgs2):
display.clear_output(wait=True)
for col, imgs in zip([self.first_column, self.second_column], [imgs1, imgs2]):
for i, (im, cb) in enumerate(col):
img = imgs[i]
im.set_data(img)
vmin = self.vmin if self.vmin is not None else np.min(img)
vmax = self.vmax if self.vmax is not None else np.max(img)
im.set_clim(vmin, vmax)
cb.set_clim(vmin, vmax)
cb.update_normal(im)
display.display(self.fig)
| 27.693878
| 99
| 0.597273
| 426
| 2,714
| 3.657277
| 0.201878
| 0.038511
| 0.023107
| 0.025674
| 0.861361
| 0.861361
| 0.861361
| 0.861361
| 0.861361
| 0.861361
| 0
| 0.024365
| 0.274134
| 2,714
| 98
| 100
| 27.693878
| 0.766497
| 0
| 0
| 0.75
| 0
| 0
| 0.012891
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.05
| 0
| 0.175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
03ca009e1be1480a1631f2ba6f2265b9cc5638d2
| 7,097
|
py
|
Python
|
loldib/getratings/models/NA/na_monkeyking/na_monkeyking_sup.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_monkeyking/na_monkeyking_sup.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_monkeyking/na_monkeyking_sup.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_MonkeyKing_Sup_Aatrox(Ratings):
pass
class NA_MonkeyKing_Sup_Ahri(Ratings):
pass
class NA_MonkeyKing_Sup_Akali(Ratings):
pass
class NA_MonkeyKing_Sup_Alistar(Ratings):
pass
class NA_MonkeyKing_Sup_Amumu(Ratings):
pass
class NA_MonkeyKing_Sup_Anivia(Ratings):
pass
class NA_MonkeyKing_Sup_Annie(Ratings):
pass
class NA_MonkeyKing_Sup_Ashe(Ratings):
pass
class NA_MonkeyKing_Sup_AurelionSol(Ratings):
pass
class NA_MonkeyKing_Sup_Azir(Ratings):
pass
class NA_MonkeyKing_Sup_Bard(Ratings):
pass
class NA_MonkeyKing_Sup_Blitzcrank(Ratings):
pass
class NA_MonkeyKing_Sup_Brand(Ratings):
pass
class NA_MonkeyKing_Sup_Braum(Ratings):
pass
class NA_MonkeyKing_Sup_Caitlyn(Ratings):
pass
class NA_MonkeyKing_Sup_Camille(Ratings):
pass
class NA_MonkeyKing_Sup_Cassiopeia(Ratings):
pass
class NA_MonkeyKing_Sup_Chogath(Ratings):
pass
class NA_MonkeyKing_Sup_Corki(Ratings):
pass
class NA_MonkeyKing_Sup_Darius(Ratings):
pass
class NA_MonkeyKing_Sup_Diana(Ratings):
pass
class NA_MonkeyKing_Sup_Draven(Ratings):
pass
class NA_MonkeyKing_Sup_DrMundo(Ratings):
pass
class NA_MonkeyKing_Sup_Ekko(Ratings):
pass
class NA_MonkeyKing_Sup_Elise(Ratings):
pass
class NA_MonkeyKing_Sup_Evelynn(Ratings):
pass
class NA_MonkeyKing_Sup_Ezreal(Ratings):
pass
class NA_MonkeyKing_Sup_Fiddlesticks(Ratings):
pass
class NA_MonkeyKing_Sup_Fiora(Ratings):
pass
class NA_MonkeyKing_Sup_Fizz(Ratings):
pass
class NA_MonkeyKing_Sup_Galio(Ratings):
pass
class NA_MonkeyKing_Sup_Gangplank(Ratings):
pass
class NA_MonkeyKing_Sup_Garen(Ratings):
pass
class NA_MonkeyKing_Sup_Gnar(Ratings):
pass
class NA_MonkeyKing_Sup_Gragas(Ratings):
pass
class NA_MonkeyKing_Sup_Graves(Ratings):
pass
class NA_MonkeyKing_Sup_Hecarim(Ratings):
pass
class NA_MonkeyKing_Sup_Heimerdinger(Ratings):
pass
class NA_MonkeyKing_Sup_Illaoi(Ratings):
pass
class NA_MonkeyKing_Sup_Irelia(Ratings):
pass
class NA_MonkeyKing_Sup_Ivern(Ratings):
pass
class NA_MonkeyKing_Sup_Janna(Ratings):
pass
class NA_MonkeyKing_Sup_JarvanIV(Ratings):
pass
class NA_MonkeyKing_Sup_Jax(Ratings):
pass
class NA_MonkeyKing_Sup_Jayce(Ratings):
pass
class NA_MonkeyKing_Sup_Jhin(Ratings):
pass
class NA_MonkeyKing_Sup_Jinx(Ratings):
pass
class NA_MonkeyKing_Sup_Kalista(Ratings):
pass
class NA_MonkeyKing_Sup_Karma(Ratings):
pass
class NA_MonkeyKing_Sup_Karthus(Ratings):
pass
class NA_MonkeyKing_Sup_Kassadin(Ratings):
pass
class NA_MonkeyKing_Sup_Katarina(Ratings):
pass
class NA_MonkeyKing_Sup_Kayle(Ratings):
pass
class NA_MonkeyKing_Sup_Kayn(Ratings):
pass
class NA_MonkeyKing_Sup_Kennen(Ratings):
pass
class NA_MonkeyKing_Sup_Khazix(Ratings):
pass
class NA_MonkeyKing_Sup_Kindred(Ratings):
pass
class NA_MonkeyKing_Sup_Kled(Ratings):
pass
class NA_MonkeyKing_Sup_KogMaw(Ratings):
pass
class NA_MonkeyKing_Sup_Leblanc(Ratings):
pass
class NA_MonkeyKing_Sup_LeeSin(Ratings):
pass
class NA_MonkeyKing_Sup_Leona(Ratings):
pass
class NA_MonkeyKing_Sup_Lissandra(Ratings):
pass
class NA_MonkeyKing_Sup_Lucian(Ratings):
pass
class NA_MonkeyKing_Sup_Lulu(Ratings):
pass
class NA_MonkeyKing_Sup_Lux(Ratings):
pass
class NA_MonkeyKing_Sup_Malphite(Ratings):
pass
class NA_MonkeyKing_Sup_Malzahar(Ratings):
pass
class NA_MonkeyKing_Sup_Maokai(Ratings):
pass
class NA_MonkeyKing_Sup_MasterYi(Ratings):
pass
class NA_MonkeyKing_Sup_MissFortune(Ratings):
pass
class NA_MonkeyKing_Sup_MonkeyKing(Ratings):
pass
class NA_MonkeyKing_Sup_Mordekaiser(Ratings):
pass
class NA_MonkeyKing_Sup_Morgana(Ratings):
pass
class NA_MonkeyKing_Sup_Nami(Ratings):
pass
class NA_MonkeyKing_Sup_Nasus(Ratings):
pass
class NA_MonkeyKing_Sup_Nautilus(Ratings):
pass
class NA_MonkeyKing_Sup_Nidalee(Ratings):
pass
class NA_MonkeyKing_Sup_Nocturne(Ratings):
pass
class NA_MonkeyKing_Sup_Nunu(Ratings):
pass
class NA_MonkeyKing_Sup_Olaf(Ratings):
pass
class NA_MonkeyKing_Sup_Orianna(Ratings):
pass
class NA_MonkeyKing_Sup_Ornn(Ratings):
pass
class NA_MonkeyKing_Sup_Pantheon(Ratings):
pass
class NA_MonkeyKing_Sup_Poppy(Ratings):
pass
class NA_MonkeyKing_Sup_Quinn(Ratings):
pass
class NA_MonkeyKing_Sup_Rakan(Ratings):
pass
class NA_MonkeyKing_Sup_Rammus(Ratings):
pass
class NA_MonkeyKing_Sup_RekSai(Ratings):
pass
class NA_MonkeyKing_Sup_Renekton(Ratings):
pass
class NA_MonkeyKing_Sup_Rengar(Ratings):
pass
class NA_MonkeyKing_Sup_Riven(Ratings):
pass
class NA_MonkeyKing_Sup_Rumble(Ratings):
pass
class NA_MonkeyKing_Sup_Ryze(Ratings):
pass
class NA_MonkeyKing_Sup_Sejuani(Ratings):
pass
class NA_MonkeyKing_Sup_Shaco(Ratings):
pass
class NA_MonkeyKing_Sup_Shen(Ratings):
pass
class NA_MonkeyKing_Sup_Shyvana(Ratings):
pass
class NA_MonkeyKing_Sup_Singed(Ratings):
pass
class NA_MonkeyKing_Sup_Sion(Ratings):
pass
class NA_MonkeyKing_Sup_Sivir(Ratings):
pass
class NA_MonkeyKing_Sup_Skarner(Ratings):
pass
class NA_MonkeyKing_Sup_Sona(Ratings):
pass
class NA_MonkeyKing_Sup_Soraka(Ratings):
pass
class NA_MonkeyKing_Sup_Swain(Ratings):
pass
class NA_MonkeyKing_Sup_Syndra(Ratings):
pass
class NA_MonkeyKing_Sup_TahmKench(Ratings):
pass
class NA_MonkeyKing_Sup_Taliyah(Ratings):
pass
class NA_MonkeyKing_Sup_Talon(Ratings):
pass
class NA_MonkeyKing_Sup_Taric(Ratings):
pass
class NA_MonkeyKing_Sup_Teemo(Ratings):
pass
class NA_MonkeyKing_Sup_Thresh(Ratings):
pass
class NA_MonkeyKing_Sup_Tristana(Ratings):
pass
class NA_MonkeyKing_Sup_Trundle(Ratings):
pass
class NA_MonkeyKing_Sup_Tryndamere(Ratings):
pass
class NA_MonkeyKing_Sup_TwistedFate(Ratings):
pass
class NA_MonkeyKing_Sup_Twitch(Ratings):
pass
class NA_MonkeyKing_Sup_Udyr(Ratings):
pass
class NA_MonkeyKing_Sup_Urgot(Ratings):
pass
class NA_MonkeyKing_Sup_Varus(Ratings):
pass
class NA_MonkeyKing_Sup_Vayne(Ratings):
pass
class NA_MonkeyKing_Sup_Veigar(Ratings):
pass
class NA_MonkeyKing_Sup_Velkoz(Ratings):
pass
class NA_MonkeyKing_Sup_Vi(Ratings):
pass
class NA_MonkeyKing_Sup_Viktor(Ratings):
pass
class NA_MonkeyKing_Sup_Vladimir(Ratings):
pass
class NA_MonkeyKing_Sup_Volibear(Ratings):
pass
class NA_MonkeyKing_Sup_Warwick(Ratings):
pass
class NA_MonkeyKing_Sup_Xayah(Ratings):
pass
class NA_MonkeyKing_Sup_Xerath(Ratings):
pass
class NA_MonkeyKing_Sup_XinZhao(Ratings):
pass
class NA_MonkeyKing_Sup_Yasuo(Ratings):
pass
class NA_MonkeyKing_Sup_Yorick(Ratings):
pass
class NA_MonkeyKing_Sup_Zac(Ratings):
pass
class NA_MonkeyKing_Sup_Zed(Ratings):
pass
class NA_MonkeyKing_Sup_Ziggs(Ratings):
pass
class NA_MonkeyKing_Sup_Zilean(Ratings):
pass
class NA_MonkeyKing_Sup_Zyra(Ratings):
pass
| 17.019185
| 47
| 0.784839
| 972
| 7,097
| 5.304527
| 0.151235
| 0.187355
| 0.455004
| 0.535299
| 0.823701
| 0.823701
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156545
| 7,097
| 416
| 48
| 17.060096
| 0.861343
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
205c3e709e6d280888ef09aae69a10afc3cd433e
| 117,032
|
py
|
Python
|
tests/test_dtscalibration.py
|
fprice111/python-dts-calibration
|
bc972070ab1c9fe43e9ecc85ace30e2877b8cd00
|
[
"BSD-3-Clause"
] | 20
|
2019-10-07T15:54:07.000Z
|
2022-03-18T07:18:22.000Z
|
tests/test_dtscalibration.py
|
fprice111/python-dts-calibration
|
bc972070ab1c9fe43e9ecc85ace30e2877b8cd00
|
[
"BSD-3-Clause"
] | 90
|
2019-01-25T09:41:37.000Z
|
2022-03-21T12:45:30.000Z
|
tests/test_dtscalibration.py
|
fprice111/python-dts-calibration
|
bc972070ab1c9fe43e9ecc85ace30e2877b8cd00
|
[
"BSD-3-Clause"
] | 9
|
2019-10-16T12:37:59.000Z
|
2022-02-18T21:24:29.000Z
|
# coding=utf-8
import os
import numpy as np
import pytest
import scipy.sparse as sp
from scipy import stats
from dtscalibration import DataStore
from dtscalibration import read_silixa_files
from dtscalibration.calibrate_utils import wls_sparse
from dtscalibration.calibrate_utils import wls_stats
from dtscalibration.cli import main
np.random.seed(0)
fn = [
"channel 1_20170921112245510.xml", "channel 1_20170921112746818.xml",
"channel 1_20170921112746818.xml"]
fn_single = [
"channel 2_20180504132202074.xml", "channel 2_20180504132232903.xml",
"channel 2_20180504132303723.xml"]
if 1:
# working dir is tests
wd = os.path.dirname(os.path.abspath(__file__))
data_dir_single_ended = os.path.join(wd, 'data', 'single_ended')
data_dir_double_ended = os.path.join(wd, 'data', 'double_ended')
data_dir_double_ended2 = os.path.join(wd, 'data', 'double_ended2')
else:
# working dir is src
data_dir_single_ended = os.path.join(
'..', '..', 'tests', 'data', 'single_ended')
data_dir_double_ended = os.path.join(
'..', '..', 'tests', 'data', 'double_ended')
data_dir_double_ended2 = os.path.join(
'..', '..', 'tests', 'data', 'double_ended2')
def assert_almost_equal_verbose(actual, desired, verbose=False, **kwargs):
"""Print the actual precision decimals"""
err = np.abs(actual - desired).max()
dec = -np.ceil(np.log10(err))
if not (np.isfinite(dec)):
dec = 18.
m = "\n>>>>>The actual precision is: " + str(float(dec))
if verbose:
print(m)
desired2 = np.broadcast_to(desired, actual.shape)
np.testing.assert_almost_equal(actual, desired2, err_msg=m, **kwargs)
pass
def test_main():
assert main([]) == 0
def test_variance_input_types_single():
import dask.array as da
from src.dtscalibration import DataStore
state = da.random.RandomState(0)
stokes_m_var = 40.
cable_len = 100.
nt = 500
time = np.arange(nt)
x = np.linspace(0., cable_len, 100)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(
-dalpha_r * x[:, None]) * np.exp(-dalpha_p * x[:, None]) * np.exp(
-gamma / temp_real) / (1 - np.exp(-gamma / temp_real))
ast = C_m * np.exp(-dalpha_r * x[:, None]) * np.exp(
-dalpha_m * x[:, None]) / (1 - np.exp(-gamma / temp_real))
st_m = st + stats.norm.rvs(size=st.shape, scale=stokes_m_var**0.5)
ast_m = ast + stats.norm.rvs(size=ast.shape, scale=1.1 * stokes_m_var**0.5)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
'st': (['x', 'time'], st_m),
'ast': (['x', 'time'], ast_m),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '0'})
sections = {
'cold': [slice(0., 0.4 * cable_len)],
'warm': [slice(0.6 * cable_len, cable_len)]}
# Test float input
st_var = 5.
ds.calibration_single_ended(
sections=sections,
st_var=st_var,
ast_var=st_var,
method='wls',
solver='sparse')
ds.conf_int_single_ended(
st_var=st_var,
ast_var=st_var,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(0, 10)).mean(), 0.044361, decimal=2)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(90, 100)).mean(), 0.242028, decimal=2)
# Test callable input
def callable_st_var(stokes):
slope = 0.01
offset = 0
return slope * stokes + offset
ds.calibration_single_ended(
sections=sections,
st_var=callable_st_var,
ast_var=callable_st_var,
method='wls',
solver='sparse')
ds.conf_int_single_ended(
st_var=callable_st_var,
ast_var=callable_st_var,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(0, 10)).mean(), 0.184753, decimal=2)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(90, 100)).mean(), 0.545186, decimal=2)
# Test input with shape of (ntime, nx)
st_var = ds.st.values * 0 + 20.
ds.calibration_single_ended(
sections=sections,
st_var=st_var,
ast_var=st_var,
method='wls',
solver='sparse')
ds.conf_int_single_ended(
st_var=st_var,
ast_var=st_var,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(ds.tmpf_mc_var.mean(), 0.418098, decimal=2)
# Test input with shape (nx, 1)
st_var = np.vstack(
ds.st.mean(dim='time').values * 0
+ np.linspace(10, 50, num=ds.st.x.size))
ds.calibration_single_ended(
sections=sections,
st_var=st_var,
ast_var=st_var,
method='wls',
solver='sparse')
ds.conf_int_single_ended(
st_var=st_var,
ast_var=st_var,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(0, 50)).mean().values, 0.2377, decimal=2)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(50, 100)).mean().values, 1.3203, decimal=2)
# Test input with shape (ntime)
st_var = ds.st.mean(dim='x').values * 0 + np.linspace(5, 200, num=nt)
ds.calibration_single_ended(
sections=sections,
st_var=st_var,
ast_var=st_var,
method='wls',
solver='sparse')
ds.conf_int_single_ended(
st_var=st_var,
ast_var=st_var,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(time=slice(0, nt // 2)).mean().values,
1.0908,
decimal=2)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(time=slice(nt // 2, None)).mean().values,
3.0759,
decimal=2)
pass
def test_variance_input_types_double():
import dask.array as da
from src.dtscalibration import DataStore
state = da.random.RandomState(0)
stokes_m_var = 40.
cable_len = 100.
nt = 500
time = np.arange(nt)
x = np.linspace(0., cable_len, 100)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(
-dalpha_r * x[:, None]) * np.exp(-dalpha_p * x[:, None]) * np.exp(
-gamma / temp_real) / (1 - np.exp(-gamma / temp_real))
ast = C_m * np.exp(-dalpha_r * x[:, None]) * np.exp(
-dalpha_m * x[:, None]) / (1 - np.exp(-gamma / temp_real))
rst = C_p * np.exp(-dalpha_r * (-x[:, None] + 100)) * np.exp(
-dalpha_p * (-x[:, None] + 100)) * np.exp(
-gamma / temp_real) / (1 - np.exp(-gamma / temp_real))
rast = C_m * np.exp(-dalpha_r * (-x[:, None] + 100)) * np.exp(
-dalpha_m * (-x[:, None] + 100)) / (1 - np.exp(-gamma / temp_real))
st_m = st + stats.norm.rvs(size=st.shape, scale=stokes_m_var**0.5)
ast_m = ast + stats.norm.rvs(size=ast.shape, scale=1.1 * stokes_m_var**0.5)
rst_m = rst + stats.norm.rvs(size=rst.shape, scale=0.9 * stokes_m_var**0.5)
rast_m = rast + stats.norm.rvs(
size=rast.shape, scale=0.8 * stokes_m_var**0.5)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
'st': (['x', 'time'], st_m),
'ast': (['x', 'time'], ast_m),
'rst': (['x', 'time'], rst_m),
'rast': (['x', 'time'], rast_m),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
sections = {
'cold': [slice(0., 0.4 * cable_len)],
'warm': [slice(0.6 * cable_len, cable_len)]}
# Test float input
st_var = 5.
ds.calibration_double_ended(
sections=sections,
st_var=st_var,
ast_var=st_var,
rst_var=st_var,
rast_var=st_var,
method='wls',
solver='sparse')
ds.conf_int_double_ended(
st_var=st_var,
ast_var=st_var,
rst_var=st_var,
rast_var=st_var,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(0, 10)).mean(), 0.03584935, decimal=2)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(90, 100)).mean(), 0.22982146, decimal=2)
# Test callable input
def st_var_callable(stokes):
slope = 0.01
offset = 0
return slope * stokes + offset
ds.calibration_double_ended(
sections=sections,
st_var=st_var_callable,
ast_var=st_var_callable,
rst_var=st_var_callable,
rast_var=st_var_callable,
method='wls',
solver='sparse')
ds.conf_int_double_ended(
st_var=st_var_callable,
ast_var=st_var_callable,
rst_var=st_var_callable,
rast_var=st_var_callable,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(0, 10)).mean(), 0.18058514, decimal=2)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(90, 100)).mean(), 0.53862813, decimal=2)
# Test input with shape of (ntime, nx)
st_var = ds.st.values * 0 + 20.
ds.calibration_double_ended(
sections=sections,
st_var=st_var,
ast_var=st_var,
rst_var=st_var,
rast_var=st_var,
method='wls',
solver='sparse')
ds.conf_int_double_ended(
st_var=st_var,
ast_var=st_var,
rst_var=st_var,
rast_var=st_var,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(ds.tmpf_mc_var.mean(), 0.40725674, decimal=2)
# Test input with shape (nx, 1)
st_var = np.vstack(
ds.st.mean(dim='time').values * 0
+ np.linspace(10, 50, num=ds.st.x.size))
ds.calibration_double_ended(
sections=sections,
st_var=st_var,
ast_var=st_var,
rst_var=st_var,
rast_var=st_var,
method='wls',
solver='sparse')
ds.conf_int_double_ended(
st_var=st_var,
ast_var=st_var,
rst_var=st_var,
rast_var=st_var,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(0, 50)).mean().values,
0.21163704,
decimal=2)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(x=slice(50, 100)).mean().values,
1.28247762,
decimal=2)
# Test input with shape (ntime)
st_var = ds.st.mean(dim='x').values * 0 + np.linspace(5, 200, num=nt)
ds.calibration_double_ended(
sections=sections,
st_var=st_var,
ast_var=st_var,
rst_var=st_var,
rast_var=st_var,
method='wls',
solver='sparse')
ds.conf_int_double_ended(
st_var=st_var,
ast_var=st_var,
rst_var=st_var,
rast_var=st_var,
mc_sample_size=100,
da_random_state=state)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(time=slice(0, nt // 2)).mean().values,
1.090,
decimal=2)
assert_almost_equal_verbose(
ds.tmpf_mc_var.sel(time=slice(nt // 2, None)).mean().values,
3.06,
decimal=2)
pass
def test_double_ended_variance_estimate_synthetic():
import dask.array as da
from src.dtscalibration import DataStore
state = da.random.RandomState(0)
stokes_m_var = 40.
cable_len = 100.
nt = 500
time = np.arange(nt)
x = np.linspace(0., cable_len, 100)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(
-dalpha_r * x[:, None]) * np.exp(-dalpha_p * x[:, None]) * np.exp(
-gamma / temp_real) / (1 - np.exp(-gamma / temp_real))
ast = C_m * np.exp(-dalpha_r * x[:, None]) * np.exp(
-dalpha_m * x[:, None]) / (1 - np.exp(-gamma / temp_real))
rst = C_p * np.exp(-dalpha_r * (-x[:, None] + 100)) * np.exp(
-dalpha_p * (-x[:, None] + 100)) * np.exp(
-gamma / temp_real) / (1 - np.exp(-gamma / temp_real))
rast = C_m * np.exp(-dalpha_r * (-x[:, None] + 100)) * np.exp(
-dalpha_m * (-x[:, None] + 100)) / (1 - np.exp(-gamma / temp_real))
st_m = st + stats.norm.rvs(size=st.shape, scale=stokes_m_var**0.5)
ast_m = ast + stats.norm.rvs(size=ast.shape, scale=1.1 * stokes_m_var**0.5)
rst_m = rst + stats.norm.rvs(size=rst.shape, scale=0.9 * stokes_m_var**0.5)
rast_m = rast + stats.norm.rvs(
size=rast.shape, scale=0.8 * stokes_m_var**0.5)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
'st': (['x', 'time'], st_m),
'ast': (['x', 'time'], ast_m),
'rst': (['x', 'time'], rst_m),
'rast': (['x', 'time'], rast_m),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
mst_var, _ = ds.variance_stokes(st_label='st', sections=sections)
mast_var, _ = ds.variance_stokes(st_label='ast', sections=sections)
mrst_var, _ = ds.variance_stokes(st_label='rst', sections=sections)
mrast_var, _ = ds.variance_stokes(st_label='rast', sections=sections)
mst_var = float(mst_var)
mast_var = float(mast_var)
mrst_var = float(mrst_var)
mrast_var = float(mrast_var)
# MC variance
ds.calibration_double_ended(
sections=sections,
st_var=mst_var,
ast_var=mast_var,
rst_var=mrst_var,
rast_var=mrast_var,
method='wls',
solver='sparse')
assert_almost_equal_verbose(ds.tmpf.mean(), 12., decimal=2)
assert_almost_equal_verbose(ds.tmpb.mean(), 12., decimal=3)
ds.conf_int_double_ended(
p_val='p_val',
p_cov='p_cov',
st_var=mst_var,
ast_var=mast_var,
rst_var=mrst_var,
rast_var=mrast_var,
store_tmpf='tmpf',
store_tmpb='tmpb',
store_tmpw='tmpw',
store_tempvar='_var',
conf_ints=[2.5, 50., 97.5],
mc_sample_size=100,
da_random_state=state)
# Calibrated variance
stdsf1 = ds.ufunc_per_section(
label='tmpf', func=np.std, temp_err=True, calc_per='stretch')
stdsb1 = ds.ufunc_per_section(
label='tmpb', func=np.std, temp_err=True, calc_per='stretch')
# Use a single timestep to better check if the parameter uncertainties propagate
ds1 = ds.isel(time=1)
# Estimated VAR
stdsf2 = ds1.ufunc_per_section(
label='tmpf_mc_var', func=np.mean, temp_err=False, calc_per='stretch')
stdsb2 = ds1.ufunc_per_section(
label='tmpb_mc_var', func=np.mean, temp_err=False, calc_per='stretch')
for (_, v1), (_, v2) in zip(stdsf1.items(), stdsf2.items()):
for v1i, v2i in zip(v1, v2):
print('Real VAR: ', v1i**2, 'Estimated VAR: ', float(v2i))
assert_almost_equal_verbose(v1i**2, v2i, decimal=2)
for (_, v1), (_, v2) in zip(stdsb1.items(), stdsb2.items()):
for v1i, v2i in zip(v1, v2):
print('Real VAR: ', v1i**2, 'Estimated VAR: ', float(v2i))
assert_almost_equal_verbose(v1i**2, v2i, decimal=2)
pass
def test_single_ended_variance_estimate_synthetic():
import dask.array as da
from src.dtscalibration import DataStore
state = da.random.RandomState(0)
stokes_m_var = 40.
astokes_m_var = 60.
cable_len = 100.
nt = 50
time = np.arange(nt)
x = np.linspace(0., cable_len, 500)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(
-dalpha_r * x[:, None]) * np.exp(-dalpha_p * x[:, None]) * np.exp(
-gamma / temp_real) / (1 - np.exp(-gamma / temp_real))
ast = C_m * np.exp(-dalpha_r * x[:, None]) * np.exp(
-dalpha_m * x[:, None]) / (1 - np.exp(-gamma / temp_real))
st_m = st + stats.norm.rvs(size=st.shape, scale=stokes_m_var**0.5)
ast_m = ast + stats.norm.rvs(size=ast.shape, scale=astokes_m_var**0.5)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
'st': (['x', 'time'], st_m),
'ast': (['x', 'time'], ast_m),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '0'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
st_label = 'st'
ast_label = 'ast'
mst_var, _ = ds.variance_stokes(st_label=st_label, sections=sections)
mast_var, _ = ds.variance_stokes(st_label=ast_label, sections=sections)
mst_var = float(mst_var)
mast_var = float(mast_var)
# MC variqnce
ds.calibration_single_ended(
sections=sections,
st_var=mst_var,
ast_var=mast_var,
method='wls',
solver='sparse')
ds.conf_int_single_ended(
p_val='p_val',
p_cov='p_cov',
st_var=mst_var,
ast_var=mast_var,
store_tmpf='tmpf',
store_tempvar='_var',
conf_ints=[2.5, 50., 97.5],
mc_sample_size=50,
da_random_state=state)
# Calibrated variance
stdsf1 = ds.ufunc_per_section(
label='tmpf', func=np.std, temp_err=True, calc_per='stretch', ddof=1)
# Use a single timestep to better check if the parameter uncertainties propagate
ds1 = ds.isel(time=1)
# Estimated VAR
stdsf2 = ds1.ufunc_per_section(
label='tmpf_mc_var', func=np.mean, temp_err=False, calc_per='stretch')
for (_, v1), (_, v2) in zip(stdsf1.items(), stdsf2.items()):
for v1i, v2i in zip(v1, v2):
print('Real VAR: ', v1i**2, 'Estimated VAR: ', float(v2i))
assert_almost_equal_verbose(v1i**2, v2i, decimal=2)
pass
@pytest.mark.skip(
reason="Not enough measurements in time. Use exponential "
"instead.")
def test_variance_of_stokes():
correct_var = 9.045
filepath = data_dir_double_ended2
ds = read_silixa_files(
directory=filepath, timezone_netcdf='UTC', file_ext='*.xml')
sections = {
'probe1Temperature': [slice(7.5, 17.),
slice(70., 80.)], # cold bath
'probe2Temperature': [slice(24., 34.),
slice(85., 95.)], # warm bath
}
I_var, _ = ds.variance_stokes(st_label='st', sections=sections)
assert_almost_equal_verbose(I_var, correct_var, decimal=1)
ds_dask = ds.chunk(chunks={})
I_var, _ = ds_dask.variance_stokes(st_label='st', sections=sections)
assert_almost_equal_verbose(I_var, correct_var, decimal=1)
pass
def test_variance_of_stokes_synthetic():
"""
Produces a synthetic Stokes measurement with a known noise distribution. Check if same
variance is obtained.
Returns
-------
"""
yvar = 5.
nx = 500
x = np.linspace(0., 20., nx)
nt = 200
G = np.linspace(3000, 4000, nt)[None]
y = G * np.exp(-0.001 * x[:, None])
y += stats.norm.rvs(size=y.size, scale=yvar**0.5).reshape(y.shape)
ds = DataStore(
{
'st': (['x', 'time'], y),
'probe1Temperature': (['time'], range(nt)),
'userAcquisitionTimeFW': (['time'], np.ones(nt))},
coords={
'x': x,
'time': range(nt)},
attrs={'isDoubleEnded': '0'})
sections = {'probe1Temperature': [slice(0., 20.)]}
test_st_var, _ = ds.variance_stokes(st_label='st', sections=sections)
assert_almost_equal_verbose(test_st_var, yvar, decimal=1)
test_st_var, _ = ds.variance_stokes(st_label='st', sections=sections)
assert_almost_equal_verbose(test_st_var, yvar, decimal=1)
pass
def test_variance_of_stokes_linear_synthetic():
"""
Produces a synthetic Stokes measurement with a known noise distribution.
Check if same variance is obtained.
Returns
-------
"""
var_slope = 0.01
nx = 500
x = np.linspace(0., 20., nx)
nt = 200
G = np.linspace(500, 4000, nt)[None]
c_no_noise = G * np.exp(-0.001 * x[:, None])
c_lin_var_through_zero = stats.norm.rvs(
loc=c_no_noise,
# size=y.size,
scale=(var_slope * c_no_noise)**0.5)
ds = DataStore(
{
'st': (['x', 'time'], c_no_noise),
'c_lin_var_through_zero': (['x', 'time'], c_lin_var_through_zero),
'probe1Temperature': (['time'], range(nt)),
'userAcquisitionTimeFW': (['time'], np.ones(nt))},
coords={
'x': x,
'time': range(nt)},
attrs={'isDoubleEnded': '0'})
sections = {'probe1Temperature': [slice(0., 20.)]}
test_st_var, _ = ds.variance_stokes(st_label='st', sections=sections)
# If fit is forced through zero. Only Poisson distributed noise
slope, offset, st_sort_mean, st_sort_var, resid, var_fun = \
ds.variance_stokes_linear(
'c_lin_var_through_zero', nbin=10, through_zero=True,
plot_fit=False)
assert_almost_equal_verbose(slope, var_slope, decimal=3)
# Fit accounts for Poisson noise plus white noise
slope, offset, st_sort_mean, st_sort_var, resid, var_fun = \
ds.variance_stokes_linear(
'c_lin_var_through_zero', nbin=100, through_zero=False)
assert_almost_equal_verbose(slope, var_slope, decimal=3)
assert_almost_equal_verbose(offset, 0., decimal=0)
pass
def test_exponential_variance_of_stokes():
correct_var = 11.86535
filepath = data_dir_double_ended2
ds = read_silixa_files(
directory=filepath, timezone_netcdf='UTC', file_ext='*.xml')
sections = {
'probe1Temperature': [slice(7.5, 17.),
slice(70., 80.)], # cold bath
'probe2Temperature': [slice(24., 34.),
slice(85., 95.)], # warm bath
}
I_var, _ = ds.variance_stokes_exponential(st_label='st', sections=sections)
assert_almost_equal_verbose(I_var, correct_var, decimal=5)
ds_dask = ds.chunk(chunks={})
I_var, _ = ds_dask.variance_stokes_exponential(
st_label='st', sections=sections)
assert_almost_equal_verbose(I_var, correct_var, decimal=5)
pass
def test_exponential_variance_of_stokes_synthetic():
"""
Produces a synthetic Stokes measurement with a known noise distribution. Check if same
variance is obtained.
Returns
-------
"""
yvar = 5.
nx = 500
x = np.linspace(0., 20., nx)
nt = 200
beta = np.linspace(3000, 4000, nt)[None]
y = beta * np.exp(-0.001 * x[:, None])
y += stats.norm.rvs(size=y.size, scale=yvar**0.5).reshape(y.shape)
ds = DataStore(
{
'st': (['x', 'time'], y),
'probe1Temperature': (['time'], range(nt)),
'userAcquisitionTimeFW': (['time'], np.ones(nt))},
coords={
'x': x,
'time': range(nt)},
attrs={'isDoubleEnded': '0'})
sections = {'probe1Temperature': [slice(0., 20.)]}
test_st_var, _ = ds.variance_stokes_exponential(
st_label='st', sections=sections)
assert_almost_equal_verbose(test_st_var, yvar, decimal=1)
pass
def test_double_ended_ols_wls_estimate_synthetic():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
Without variance.
They should be the same as the parameters used to create the synthetic
measurment set"""
from dtscalibration import DataStore
cable_len = 100.
nt = 50
time = np.arange(nt)
x = np.linspace(0., cable_len, 100)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-(dalpha_r + dalpha_p) * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-(dalpha_r + dalpha_m) * x[:, None]) / \
(np.exp(gamma / temp_real) - 1)
rst = C_p * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
rast = C_m * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real) - 1)
alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2
alpha -= alpha[0] # the first x-index is where to start counting
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
sections = {
'cold': [slice(0., 0.4 * cable_len)],
'warm': [slice(0.65 * cable_len, cable_len)]}
# OLS
ds.calibration_double_ended(
sections=sections, method='ols', solver='sparse')
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=11)
assert_almost_equal_verbose(
ds.alpha.values, alpha, decimal=12) # 13 in 64-bit
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=10)
assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=10)
assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=11)
# WLS
ds.calibration_double_ended(
sections=sections,
st_var=1e-7,
ast_var=1e-7,
rst_var=1e-7,
rast_var=1e-7,
method='wls',
solver='sparse',
tmpw_mc_size=5)
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=10)
assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=8)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=6)
assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=6)
assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=6)
def test_double_ended_ols_wls_estimate_synthetic_df_and_db_are_different():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
Without variance.
They should be the same as the parameters used to create the synthetic
measurment set. This one has a different D for the forward channel than
for the backward channel."""
from dtscalibration import DataStore
cable_len = 100.
nt = 3
time = np.arange(nt)
x = np.linspace(0., cable_len, 8)
ts_cold = np.ones(nt) * 4. + np.cos(time) * 4
ts_warm = np.ones(nt) * 20. + -np.sin(time) * 4
C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4
eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)
eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)
C_m = 5000.
eta_mf = np.cos(time + np.pi / 8) / 10 + 1
eta_mb = np.sin(time + np.pi / 8) / 10 + 1
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
temp_real_kelvin = np.zeros((len(x), nt)) + 273.15
temp_real_kelvin[x < 0.2 * cable_len] += ts_cold[None]
temp_real_kelvin[x > 0.85 * cable_len] += ts_warm[None]
temp_real_celsius = temp_real_kelvin - 273.15
st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real_kelvin) - 1)
rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \
np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real_kelvin) / (
np.exp(gamma / temp_real_kelvin) - 1)
rast = eta_mb[None] * C_m * np.exp(
-dalpha_r * (-x[:, None] + cable_len)) * np.exp(
-dalpha_m * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real_kelvin) - 1)
c_f = np.log(eta_mf * C_m / (eta_pf * C_p))
c_b = np.log(eta_mb * C_m / (eta_pb * C_p))
dalpha = dalpha_p - dalpha_m # \Delta\alpha
alpha_int = cable_len * dalpha
df = c_f # reference section starts at first x-index
db = c_b + alpha_int
i_fw = np.log(st / ast)
i_bw = np.log(rst / rast)
E_real = (i_bw - i_fw) / 2 + (db - df) / 2
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
ds.sections = {
'cold': [slice(0., 0.09 * cable_len)],
'warm': [slice(0.9 * cable_len, cable_len)]}
real_ans2 = np.concatenate(([gamma], df, db, E_real[:, 0]))
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=1000,
fix_gamma=(gamma, 0.),
remove_mc_set_flag=True)
assert_almost_equal_verbose(df, ds.df.values, decimal=14)
assert_almost_equal_verbose(db, ds.db.values, decimal=13)
assert_almost_equal_verbose(
x * (dalpha_p - dalpha_m),
ds.alpha.values - ds.alpha.values[0],
decimal=13)
assert np.all(np.abs(real_ans2 - ds.p_val.values) < 1e-10)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=10)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=10)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=10)
pass
def test_reneaming_old_default_labels_to_new_fixed_labels():
"""Same as
`test_double_ended_ols_wls_estimate_synthetic_df_and_db_are_different`
Which runs fast, but using the renaming function."""
from dtscalibration import DataStore
cable_len = 100.
nt = 3
time = np.arange(nt)
x = np.linspace(0., cable_len, 8)
ts_cold = np.ones(nt) * 4. + np.cos(time) * 4
ts_warm = np.ones(nt) * 20. + -np.sin(time) * 4
C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4
eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)
eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)
C_m = 5000.
eta_mf = np.cos(time + np.pi / 8) / 10 + 1
eta_mb = np.sin(time + np.pi / 8) / 10 + 1
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
temp_real_kelvin = np.zeros((len(x), nt)) + 273.15
temp_real_kelvin[x < 0.2 * cable_len] += ts_cold[None]
temp_real_kelvin[x > 0.85 * cable_len] += ts_warm[None]
temp_real_celsius = temp_real_kelvin - 273.15
st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real_kelvin) - 1)
rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \
np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real_kelvin) / (
np.exp(gamma / temp_real_kelvin) - 1)
rast = eta_mb[None] * C_m * np.exp(
-dalpha_r * (-x[:, None] + cable_len)) * np.exp(
-dalpha_m * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real_kelvin) - 1)
c_f = np.log(eta_mf * C_m / (eta_pf * C_p))
c_b = np.log(eta_mb * C_m / (eta_pb * C_p))
dalpha = dalpha_p - dalpha_m # \Delta\alpha
alpha_int = cable_len * dalpha
df = c_f # reference section starts at first x-index
db = c_b + alpha_int
i_fw = np.log(st / ast)
i_bw = np.log(rst / rast)
E_real = (i_bw - i_fw) / 2 + (db - df) / 2
ds = DataStore(
{
'ST': (['x', 'time'], st),
'AST': (['x', 'time'], ast),
'REV-ST': (['x', 'time'], rst),
'REV-AST': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
ds = ds.rename_labels()
ds.sections = {
'cold': [slice(0., 0.09 * cable_len)],
'warm': [slice(0.9 * cable_len, cable_len)]}
real_ans2 = np.concatenate(([gamma], df, db, E_real[:, 0]))
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=1000,
fix_gamma=(gamma, 0.),
remove_mc_set_flag=True)
assert_almost_equal_verbose(df, ds.df.values, decimal=14)
assert_almost_equal_verbose(db, ds.db.values, decimal=13)
assert_almost_equal_verbose(
x * (dalpha_p - dalpha_m),
ds.alpha.values - ds.alpha.values[0],
decimal=13)
assert np.all(np.abs(real_ans2 - ds.p_val.values) < 1e-10)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=10)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=10)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=10)
pass
@pytest.mark.xfail
def test_fail_if_st_labels_are_passed_to_calibration_function():
"""Same as
`test_double_ended_ols_wls_estimate_synthetic_df_and_db_are_different`
Which runs fast."""
from dtscalibration import DataStore
cable_len = 100.
nt = 3
time = np.arange(nt)
x = np.linspace(0., cable_len, 8)
ts_cold = np.ones(nt) * 4. + np.cos(time) * 4
ts_warm = np.ones(nt) * 20. + -np.sin(time) * 4
C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4
eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)
eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)
C_m = 5000.
eta_mf = np.cos(time + np.pi / 8) / 10 + 1
eta_mb = np.sin(time + np.pi / 8) / 10 + 1
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
temp_real_kelvin = np.zeros((len(x), nt)) + 273.15
temp_real_kelvin[x < 0.2 * cable_len] += ts_cold[None]
temp_real_kelvin[x > 0.85 * cable_len] += ts_warm[None]
st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real_kelvin) - 1)
rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \
np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real_kelvin) / (
np.exp(gamma / temp_real_kelvin) - 1)
rast = eta_mb[None] * C_m * np.exp(
-dalpha_r * (-x[:, None] + cable_len)) * np.exp(
-dalpha_m * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ds = DataStore(
{
'ST': (['x', 'time'], st),
'AST': (['x', 'time'], ast),
'REV-ST': (['x', 'time'], rst),
'REV-AST': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
ds = ds.rename_labels()
ds.sections = {
'cold': [slice(0., 0.09 * cable_len)],
'warm': [slice(0.9 * cable_len, cable_len)]}
ds.calibration_double_ended(
st_label='ST',
ast_label='AST',
rst_label='REV-ST',
rast_label='REV-AST',
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=1000,
fix_gamma=(gamma, 0.),
remove_mc_set_flag=True)
pass
def test_double_ended_asymmetrical_attenuation():
from dtscalibration import DataStore
cable_len = 100.
nt = 3
time = np.arange(nt)
nx_per_sec = 1
nx = nx_per_sec * 8
x = np.linspace(0., cable_len, nx)
ts_cold = 4. + np.cos(time) * 4
ts_warm = 20. + -np.sin(time) * 4
ts_ground = 6.
C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4
eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)
eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)
C_m = 5000.
eta_mf = np.cos(time + np.pi / 8) / 10 + 1
eta_mb = np.sin(time + np.pi / 8) / 10 + 1
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
talph_fw = 0.9
talph_bw = 0.8
temp_real_kelvin = np.zeros((len(x), nt)) + 273.15
temp_real_kelvin[:nx_per_sec] += ts_cold[None]
temp_real_kelvin[nx_per_sec:2 * nx_per_sec] += ts_warm[None]
temp_real_kelvin[-nx_per_sec:] += ts_cold[None]
temp_real_kelvin[-2 * nx_per_sec:-nx_per_sec] += ts_warm[None]
temp_real_kelvin[2 * nx_per_sec:-2 * nx_per_sec] += ts_ground
temp_real_celsius = temp_real_kelvin - 273.15
st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \
(np.exp(gamma / temp_real_kelvin) - 1)
st[4 * nx_per_sec:] *= talph_fw
ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \
np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real_kelvin) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst[:4 * nx_per_sec] *= talph_bw
rast = eta_mb[None] * C_m * np.exp(
-dalpha_r * (-x[:, None] + cable_len)) * np.exp(
-dalpha_m * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ds = DataStore(
{
'TMPR': (['x', 'time'], temp_real_celsius),
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
ds.sections = {
'cold': [slice(0., x[nx_per_sec - 1]),
slice(x[-nx_per_sec], x[-1])],
'warm':
[
slice(x[nx_per_sec], x[2 * nx_per_sec - 1]),
slice(x[-2 * nx_per_sec], x[-1 * nx_per_sec - 1])]}
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=1000,
remove_mc_set_flag=True,
trans_att=[50.])
assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)
# test `trans_att` related functions
# Clear out old results
ds.set_trans_att([])
assert ds.trans_att.size == 0, 'clear out trans_att config'
del_keys = []
for k, v in ds.data_vars.items():
if 'trans_att' in v.dims:
del_keys.append(k)
assert len(del_keys) == 0, 'clear out trans_att config'
# About to be depreciated
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=1000,
remove_mc_set_flag=True,
transient_asym_att_x=[50.])
assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)
pass
def test_double_ended_one_matching_section_and_one_asym_att():
from dtscalibration import DataStore
cable_len = 100.
nt = 3
time = np.arange(nt)
nx_per_sec = 2
nx = nx_per_sec * 8
x = np.linspace(0., cable_len, nx)
ts_cold = 4. + np.cos(time) * 4
ts_warm = 20. + -np.sin(time) * 4
ts_ground = 6.
C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4
eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)
eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)
C_m = 5000.
eta_mf = np.cos(time + np.pi / 8) / 10 + 1
eta_mb = np.sin(time + np.pi / 8) / 10 + 1
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
talph_fw = 0.9
talph_bw = 0.8
temp_real_kelvin = np.zeros((len(x), nt)) + 273.15
temp_real_kelvin[:nx_per_sec] += ts_cold[None]
temp_real_kelvin[nx_per_sec:2 * nx_per_sec] += ts_warm[None]
temp_real_kelvin[-nx_per_sec:] += ts_cold[None]
temp_real_kelvin[-2 * nx_per_sec:-nx_per_sec] += ts_warm[None]
temp_real_kelvin[2 * nx_per_sec:-2 * nx_per_sec] += ts_ground
temp_real_celsius = temp_real_kelvin - 273.15
st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \
(np.exp(gamma / temp_real_kelvin) - 1)
st[4 * nx_per_sec:] *= talph_fw
ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \
np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real_kelvin) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst[:4 * nx_per_sec] *= talph_bw
rast = eta_mb[None] * C_m * np.exp(
-dalpha_r * (-x[:, None] + cable_len)) * np.exp(
-dalpha_m * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ds = DataStore(
{
'TMPR': (['x', 'time'], temp_real_celsius),
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
ds.sections = {
'cold': [slice(0., x[nx_per_sec - 1])],
'warm': [slice(x[nx_per_sec], x[2 * nx_per_sec - 1])]}
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=3,
remove_mc_set_flag=True,
trans_att=[50.],
matching_sections=[
(
slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]),
slice(x[4 * nx_per_sec], x[5 * nx_per_sec - 1]), True)])
assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)
def test_double_ended_two_matching_sections_and_two_asym_atts():
"""Setup contains two matching sections and two connectors that introduce
asymmetrical attenuation. Solves beautifully."""
from dtscalibration import DataStore
cable_len = 100.
nt = 5
time = np.arange(nt)
nx_per_sec = 4
nx = nx_per_sec * 9
x = np.linspace(0., cable_len, nx)
ts_cold = 4. + np.cos(time) * 4
ts_warm = 20. + -np.sin(time) * 4
ts_ground = np.linspace(1, 9, num=nx_per_sec)
C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4
eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)
eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)
C_m = 5000.
eta_mf = np.cos(time + np.pi / 8) / 10 + 1
eta_mb = np.sin(time + np.pi / 8) / 10 + 1
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
talph_fw = 0.95
talph_bw = 0.85
temp_real_kelvin = np.zeros((len(x), nt)) + 273.15
temp_real_kelvin[:nx_per_sec] += ts_cold[None]
temp_real_kelvin[nx_per_sec:2 * nx_per_sec] += ts_warm[None]
temp_real_kelvin[2 * nx_per_sec:3 * nx_per_sec] += ts_ground[:, None]
temp_real_kelvin[3 * nx_per_sec:4 * nx_per_sec] += ts_ground[::-1, None]
temp_real_kelvin[5 * nx_per_sec:6 * nx_per_sec] += ts_ground[:, None] + 5
temp_real_kelvin[6 * nx_per_sec:7 * nx_per_sec] += ts_ground[:, None] + 5
temp_real_kelvin[7 * nx_per_sec:8 * nx_per_sec] += ts_warm[None]
temp_real_kelvin[8 * nx_per_sec:9 * nx_per_sec] += ts_cold[None]
temp_real_celsius = temp_real_kelvin - 273.15
st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \
(np.exp(gamma / temp_real_kelvin) - 1)
st[3 * nx_per_sec:] *= talph_fw
st[6 * nx_per_sec:] *= talph_fw
ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \
np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real_kelvin) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst[:3 * nx_per_sec] *= talph_bw
rst[:6 * nx_per_sec] *= talph_bw
rast = eta_mb[None] * C_m * np.exp(
-dalpha_r * (-x[:, None] + cable_len)) * np.exp(
-dalpha_m * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ds = DataStore(
{
'TMPR': (['x', 'time'], temp_real_celsius),
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
ds.sections = {
'cold': [slice(0., x[nx_per_sec - 1])],
'warm': [slice(x[nx_per_sec], x[2 * nx_per_sec - 1])]}
ms = [
(
slice(x[2 * nx_per_sec], x[3 * nx_per_sec - 1]),
slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]), True),
(
slice(x[5 * nx_per_sec], x[6 * nx_per_sec - 1]),
slice(x[6 * nx_per_sec], x[7 * nx_per_sec - 1]), False)]
ds.calibration_double_ended(
st_var=.5,
ast_var=.5,
rst_var=0.1,
rast_var=0.1,
method='wls',
solver='sparse',
tmpw_mc_size=3,
remove_mc_set_flag=True,
trans_att=[x[3 * nx_per_sec], x[6 * nx_per_sec]],
matching_sections=ms)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)
pass
def test_double_ended_ols_wls_fix_gamma_estimate_synthetic():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
Without variance.
They should be the same as the parameters used to create the synthetic
measurment set"""
from dtscalibration import DataStore
cable_len = 100.
nt = 500
time = np.arange(nt)
x = np.linspace(0., cable_len, 100)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-(dalpha_r + dalpha_p) * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-(dalpha_r + dalpha_m) * x[:, None]) / \
(np.exp(gamma / temp_real) - 1)
rst = C_p * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
rast = C_m * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real) - 1)
alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2
alpha -= alpha[0] # the first x-index is where to start counting
dalpha = dalpha_p - dalpha_m
alpha2 = x * dalpha
# to ensure the st, rst, ast, rast were correctly defined.
np.testing.assert_allclose(alpha2, alpha, atol=1e-15, rtol=0)
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
sections = {
'cold': [slice(0., 0.35 * cable_len)],
'warm': [slice(0.67 * cable_len, cable_len)]}
# OLS
ds.calibration_double_ended(
sections=sections,
method='ols',
solver='sparse',
fix_gamma=(gamma, 0.))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)
assert_almost_equal_verbose(
ds.alpha.values, alpha, decimal=10) # 11 in 64-bit
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=8)
# WLS
ds.calibration_double_ended(
sections=sections,
st_var=1e-12,
ast_var=1e-12,
rst_var=1e-12,
rast_var=1e-12,
method='wls',
solver='sparse',
tmpw_mc_size=5,
fix_gamma=(gamma, 0.))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)
assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=9)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=6)
assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=6)
assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=6)
pass
def test_double_ended_ols_wls_fix_alpha_estimate_synthetic():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
Without variance.
They should be the same as the parameters used to create the synthetic
measurment set"""
from dtscalibration import DataStore
cable_len = 100.
nt = 500
time = np.arange(nt)
x = np.linspace(0., cable_len, 100)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-(dalpha_r + dalpha_p) * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-(dalpha_r + dalpha_m) * x[:, None]) / \
(np.exp(gamma / temp_real) - 1)
rst = C_p * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
rast = C_m * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real) - 1)
alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2
alpha -= alpha[0] # the first x-index is where to start counting
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
sections = {
'cold': [slice(0., 0.4 * cable_len)],
'warm': [slice(0.78 * cable_len, cable_len)]}
# OLS
ds.calibration_double_ended(
sections=sections,
method='ols',
solver='sparse',
fix_alpha=(alpha, np.zeros_like(alpha)))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=9)
assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=18)
assert_almost_equal_verbose(
ds.tmpf.values, temp_real - 273.15, decimal=8) # 9 on 64-bit
assert_almost_equal_verbose(
ds.tmpb.values, temp_real - 273.15, decimal=8) # 9 on 64-bit
assert_almost_equal_verbose(
ds.tmpw.values, temp_real - 273.15, decimal=7) # 11 on 64-bit
# WLS
ds.calibration_double_ended(
sections=sections,
st_var=1e-7,
ast_var=1e-7,
rst_var=1e-7,
rast_var=1e-7,
method='wls',
solver='sparse',
tmpw_mc_size=5,
fix_alpha=(alpha, np.zeros_like(alpha)))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=8)
assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=18)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=7)
assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=7)
assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=7)
pass
def test_double_ended_ols_wls_fix_alpha_fix_gamma_estimate_synthetic():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
Without variance.
They should be the same as the parameters used to create the synthetic
measurment set"""
from dtscalibration import DataStore
cable_len = 100.
nt = 500
time = np.arange(nt)
x = np.linspace(0., cable_len, 100)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-(dalpha_r + dalpha_p) * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-(dalpha_r + dalpha_m) * x[:, None]) / \
(np.exp(gamma / temp_real) - 1)
rst = C_p * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
rast = C_m * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real) - 1)
alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2
alpha -= alpha[0] # the first x-index is where to start counting
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
# OLS
ds.calibration_double_ended(
sections=sections,
method='ols',
solver='sparse',
fix_gamma=(gamma, 0.),
fix_alpha=(alpha, np.zeros_like(alpha)))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)
assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=18)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=9)
assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=9)
assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=9)
# WLS
ds.calibration_double_ended(
sections=sections,
st_var=1e-7,
ast_var=1e-7,
rst_var=1e-7,
rast_var=1e-7,
method='wls',
solver='sparse',
tmpw_mc_size=5,
fix_gamma=(gamma, 0.),
fix_alpha=(alpha, np.zeros_like(alpha)))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)
assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=18)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=11)
assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=11)
assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=11)
pass
def test_double_ended_fix_alpha_matching_sections_and_one_asym_att():
from dtscalibration import DataStore
cable_len = 100.
nt = 3
time = np.arange(nt)
nx_per_sec = 2
nx = nx_per_sec * 8
x = np.linspace(0., cable_len, nx)
ts_cold = 4. + np.cos(time) * 4
ts_warm = 20. + -np.sin(time) * 4
ts_ground = 6.
C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4
eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)
eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)
C_m = 5000.
eta_mf = np.cos(time + np.pi / 8) / 10 + 1
eta_mb = np.sin(time + np.pi / 8) / 10 + 1
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
talph_fw = 0.9
talph_bw = 0.8
temp_real_kelvin = np.zeros((len(x), nt)) + 273.15
temp_real_kelvin[:nx_per_sec] += ts_cold[None]
temp_real_kelvin[nx_per_sec:2 * nx_per_sec] += ts_warm[None]
temp_real_kelvin[-nx_per_sec:] += ts_cold[None]
temp_real_kelvin[-2 * nx_per_sec:-nx_per_sec] += ts_warm[None]
temp_real_kelvin[2 * nx_per_sec:-2 * nx_per_sec] += ts_ground
temp_real_celsius = temp_real_kelvin - 273.15
st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \
(np.exp(gamma / temp_real_kelvin) - 1)
st[4 * nx_per_sec:] *= talph_fw
ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \
np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real_kelvin) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst[:4 * nx_per_sec] *= talph_bw
rast = eta_mb[None] * C_m * np.exp(
-dalpha_r * (-x[:, None] + cable_len)) * np.exp(
-dalpha_m * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ds = DataStore(
{
'TMPR': (['x', 'time'], temp_real_celsius),
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
ds.sections = {
'cold': [slice(0., x[nx_per_sec - 1])],
'warm': [slice(x[nx_per_sec], x[2 * nx_per_sec - 1])]}
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=3,
remove_mc_set_flag=True,
trans_att=[50.],
matching_sections=[
(
slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]),
slice(x[4 * nx_per_sec], x[5 * nx_per_sec - 1]), True)])
# remove TA vars
k = [
'talpha_fw', 'talpha_bw', 'talpha_fw_var', 'talpha_bw_var',
'trans_att']
for ki in k:
del ds[ki]
alpha_adj = ds.alpha.values.copy()
alpha_var_adj = ds.alpha_var.values.copy()
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=3,
remove_mc_set_flag=True,
fix_alpha=(alpha_adj, alpha_var_adj),
trans_att=[50.],
matching_sections=[
(
slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]),
slice(x[4 * nx_per_sec], x[5 * nx_per_sec - 1]), True)])
assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)
pass
def test_double_ended_fix_alpha_gamma_matching_sections_and_one_asym_att():
from dtscalibration import DataStore
cable_len = 100.
nt = 3
time = np.arange(nt)
nx_per_sec = 2
nx = nx_per_sec * 8
x = np.linspace(0., cable_len, nx)
ts_cold = 4. + np.cos(time) * 4
ts_warm = 20. + -np.sin(time) * 4
ts_ground = 6.
C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4
eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)
eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)
C_m = 5000.
eta_mf = np.cos(time + np.pi / 8) / 10 + 1
eta_mb = np.sin(time + np.pi / 8) / 10 + 1
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
talph_fw = 0.9
talph_bw = 0.8
temp_real_kelvin = np.zeros((len(x), nt)) + 273.15
temp_real_kelvin[:nx_per_sec] += ts_cold[None]
temp_real_kelvin[nx_per_sec:2 * nx_per_sec] += ts_warm[None]
temp_real_kelvin[-nx_per_sec:] += ts_cold[None]
temp_real_kelvin[-2 * nx_per_sec:-nx_per_sec] += ts_warm[None]
temp_real_kelvin[2 * nx_per_sec:-2 * nx_per_sec] += ts_ground
temp_real_celsius = temp_real_kelvin - 273.15
st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \
(np.exp(gamma / temp_real_kelvin) - 1)
st[4 * nx_per_sec:] *= talph_fw
ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \
np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real_kelvin) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst[:4 * nx_per_sec] *= talph_bw
rast = eta_mb[None] * C_m * np.exp(
-dalpha_r * (-x[:, None] + cable_len)) * np.exp(
-dalpha_m * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ds = DataStore(
{
'TMPR': (['x', 'time'], temp_real_celsius),
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
ds.sections = {
'cold': [slice(0., x[nx_per_sec - 1])],
'warm': [slice(x[nx_per_sec], x[2 * nx_per_sec - 1])]}
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=3,
remove_mc_set_flag=True,
trans_att=[50.],
matching_sections=[
(
slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]),
slice(x[4 * nx_per_sec], x[5 * nx_per_sec - 1]), True)])
# remove TA vars
k = [
'talpha_fw', 'talpha_bw', 'talpha_fw_var', 'talpha_bw_var',
'trans_att']
for ki in k:
del ds[ki]
alpha_adj = ds.alpha.values.copy()
alpha_var_adj = ds.alpha_var.values.copy()
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=3,
remove_mc_set_flag=True,
fix_alpha=(alpha_adj, alpha_var_adj),
fix_gamma=(gamma, 0.),
trans_att=[50.],
matching_sections=[
(
slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]),
slice(x[4 * nx_per_sec], x[5 * nx_per_sec - 1]), True)])
assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)
pass
def test_double_ended_fix_gamma_matching_sections_and_one_asym_att():
from dtscalibration import DataStore
cable_len = 100.
nt = 3
time = np.arange(nt)
nx_per_sec = 2
nx = nx_per_sec * 8
x = np.linspace(0., cable_len, nx)
ts_cold = 4. + np.cos(time) * 4
ts_warm = 20. + -np.sin(time) * 4
ts_ground = 6.
C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4
eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)
eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)
C_m = 5000.
eta_mf = np.cos(time + np.pi / 8) / 10 + 1
eta_mb = np.sin(time + np.pi / 8) / 10 + 1
dalpha_r = 0.005284
dalpha_m = 0.004961
dalpha_p = 0.005607
gamma = 482.6
talph_fw = 0.9
talph_bw = 0.8
temp_real_kelvin = np.zeros((len(x), nt)) + 273.15
temp_real_kelvin[:nx_per_sec] += ts_cold[None]
temp_real_kelvin[nx_per_sec:2 * nx_per_sec] += ts_warm[None]
temp_real_kelvin[-nx_per_sec:] += ts_cold[None]
temp_real_kelvin[-2 * nx_per_sec:-nx_per_sec] += ts_warm[None]
temp_real_kelvin[2 * nx_per_sec:-2 * nx_per_sec] += ts_ground
temp_real_celsius = temp_real_kelvin - 273.15
st = eta_pf[None] * C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real_kelvin) / \
(np.exp(gamma / temp_real_kelvin) - 1)
st[4 * nx_per_sec:] *= talph_fw
ast = eta_mf[None] * C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst = eta_pb[None] * C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \
np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real_kelvin) / (
np.exp(gamma / temp_real_kelvin) - 1)
rst[:4 * nx_per_sec] *= talph_bw
rast = eta_mb[None] * C_m * np.exp(
-dalpha_r * (-x[:, None] + cable_len)) * np.exp(
-dalpha_m * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real_kelvin) - 1)
ds = DataStore(
{
'TMPR': (['x', 'time'], temp_real_celsius),
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
ds.sections = {
'cold': [slice(0., x[nx_per_sec - 1])],
'warm': [slice(x[nx_per_sec], x[2 * nx_per_sec - 1])]}
ds.calibration_double_ended(
st_var=1.5,
ast_var=1.5,
rst_var=1.,
rast_var=1.,
method='wls',
solver='sparse',
tmpw_mc_size=3,
fix_gamma=(gamma, 0.),
remove_mc_set_flag=True,
trans_att=[50.],
matching_sections=[
(
slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]),
slice(x[4 * nx_per_sec], x[5 * nx_per_sec - 1]), True)])
assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)
assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)
pass
@pytest.mark.skip(
reason="Superseeded by "
"test_estimate_variance_of_temperature_estimate")
def test_double_ended_exponential_variance_estimate_synthetic():
import dask.array as da
from dtscalibration import DataStore
state = da.random.RandomState(0)
stokes_m_var = 4.
cable_len = 100.
nt = 5
time = np.arange(nt)
x = np.linspace(0., cable_len, 100)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-(dalpha_r + dalpha_p) * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-(dalpha_r + dalpha_m) * x[:, None]) / \
(np.exp(gamma / temp_real) - 1)
rst = C_p * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
rast = C_m * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real) - 1)
mst_var = 1. * stokes_m_var
mast_var = 1.5 * stokes_m_var
mrst_var = 0.8 * stokes_m_var
mrast_var = 0.5 * stokes_m_var
st_m = st + stats.norm.rvs(size=st.shape, scale=mst_var**0.5)
ast_m = ast + stats.norm.rvs(size=ast.shape, scale=mast_var**0.5)
rst_m = rst + stats.norm.rvs(size=rst.shape, scale=mrst_var**0.5)
rast_m = rast + stats.norm.rvs(size=rast.shape, scale=mrast_var**0.5)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
# 'st': (['x', 'time'], st),
# 'ast': (['x', 'time'], ast),
# 'rst': (['x', 'time'], rst),
# 'rast': (['x', 'time'], rast),
'st': (['x', 'time'], st_m),
'ast': (['x', 'time'], ast_m),
'rst': (['x', 'time'], rst_m),
'rast': (['x', 'time'], rast_m),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
st_label = 'st'
ast_label = 'ast'
rst_label = 'rst'
rast_label = 'rast'
# MC variance
ds.calibration_double_ended(
sections=sections,
st_label=st_label,
ast_label=ast_label,
rst_label=rst_label,
rast_label=rast_label,
st_var=mst_var,
ast_var=mast_var,
rst_var=mrst_var,
rast_var=mrast_var,
method='wls',
solver='sparse')
ds.conf_int_double_ended(
p_val='p_val',
p_cov='p_cov',
st_label=st_label,
ast_label=ast_label,
rst_label=rst_label,
rast_label=rast_label,
st_var=mst_var,
ast_var=mast_var,
rst_var=mrst_var,
rast_var=mrast_var,
store_tmpf='tmpf',
store_tmpb='tmpb',
store_tmpw='tmpw',
store_tempvar='_var',
conf_ints=[2.5, 50., 97.5],
mc_sample_size=100,
da_random_state=state)
# Calibrated variance
stdsf1 = ds.ufunc_per_section(
label='tmpf', func=np.std, temp_err=True, calc_per='stretch')
stdsb1 = ds.ufunc_per_section(
label='tmpb', func=np.std, temp_err=True, calc_per='stretch')
# Use a single timestep to better check if the parameter uncertainties propagate
ds1 = ds.isel(time=1)
# Estimated VAR
stdsf2 = ds1.ufunc_per_section(
label='tmpf_mc_var', func=np.mean, temp_err=False, calc_per='stretch')
stdsb2 = ds1.ufunc_per_section(
label='tmpb_mc_var', func=np.mean, temp_err=False, calc_per='stretch')
for (_, v1), (_, v2) in zip(stdsf1.items(), stdsf2.items()):
for v1i, v2i in zip(v1, v2):
print('Real VAR: ', v1i**2, 'Estimated VAR: ', v2i)
assert_almost_equal_verbose(v1i**2, v2i, decimal=1)
for (_, v1), (_, v2) in zip(stdsb1.items(), stdsb2.items()):
for v1i, v2i in zip(v1, v2):
print('Real VAR: ', v1i**2, 'Estimated VAR: ', v2i)
assert_almost_equal_verbose(v1i**2, v2i, decimal=1)
pass
def test_estimate_variance_of_temperature_estimate():
import dask.array as da
from dtscalibration import DataStore
state = da.random.RandomState(0)
stokes_m_var = 0.1
cable_len = 10.
nt = 150
time = np.arange(nt)
nmc = 201
x = np.linspace(0., cable_len, 64)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 1524.
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
# alpha_int = cable_len * (dalpha_p - dalpha_m)
# alpha = x * (dalpha_p - dalpha_m)
st = C_p * np.exp(-(dalpha_r + dalpha_p) * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-(dalpha_r + dalpha_m) * x[:, None]) / \
(np.exp(gamma / temp_real) - 1)
rst = C_p * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len)) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
rast = C_m * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len)) / \
(np.exp(gamma / temp_real) - 1)
mst_var = 1. * stokes_m_var
mast_var = 1.5 * stokes_m_var
mrst_var = 0.8 * stokes_m_var
mrast_var = 0.5 * stokes_m_var
st_m = st + stats.norm.rvs(size=st.shape, scale=mst_var**0.5)
ast_m = ast + stats.norm.rvs(size=ast.shape, scale=mast_var**0.5)
rst_m = rst + stats.norm.rvs(size=rst.shape, scale=mrst_var**0.5)
rast_m = rast + stats.norm.rvs(size=rast.shape, scale=mrast_var**0.5)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
# 'st': (['x', 'time'], st),
# 'ast': (['x', 'time'], ast),
# 'rst': (['x', 'time'], rst),
# 'rast': (['x', 'time'], rast),
'st': (['x', 'time'], st_m),
'ast': (['x', 'time'], ast_m),
'rst': (['x', 'time'], rst_m),
'rast': (['x', 'time'], rast_m),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '1'})
sections = {
'cold': [slice(0., 0.25 * cable_len)],
'warm': [slice(0.5 * cable_len, 0.75 * cable_len)]}
# st_label = 'mst'
# ast_label = 'mast'
# rst_label = 'mrst'
# rast_label = 'mrast'
# MC variance
ds.calibration_double_ended(
sections=sections,
st_var=mst_var,
ast_var=mast_var,
rst_var=mrst_var,
rast_var=mrast_var,
# fix_gamma=(gamma, 0.),
# fix_alpha=(alpha, 0. * alpha),
method='wls',
solver='stats',
tmpw_mc_size=nmc)
ds.conf_int_double_ended(
p_val='p_val',
p_cov='p_cov',
st_var=mst_var,
ast_var=mast_var,
rst_var=mrst_var,
rast_var=mrast_var,
store_tmpf='tmpf',
store_tmpb='tmpb',
store_tmpw='tmpw',
store_tempvar='_var',
conf_ints=[20., 80.],
mc_sample_size=nmc,
da_random_state=state,
remove_mc_set_flag=False,
reduce_memory_usage=1)
assert_almost_equal_verbose(
(ds.r_st - ds.st).var(dim=['mc', 'time']), mst_var, decimal=2)
assert_almost_equal_verbose(
(ds.r_ast - ds.ast).var(dim=['mc', 'time']), mast_var, decimal=2)
assert_almost_equal_verbose(
(ds.r_rst - ds.rst).var(dim=['mc', 'time']), mrst_var, decimal=2)
assert_almost_equal_verbose(
(ds.r_rast - ds.rast).var(dim=['mc', 'time']), mrast_var, decimal=3)
assert_almost_equal_verbose(ds.gamma_mc.var(dim='mc'), 0., decimal=2)
assert_almost_equal_verbose(ds.alpha_mc.var(dim='mc'), 0., decimal=8)
assert_almost_equal_verbose(ds.df_mc.var(dim='mc'), ds.df_var, decimal=7)
assert_almost_equal_verbose(ds.db_mc.var(dim='mc'), ds.db_var, decimal=8)
# tmpf
temp_real2 = temp_real[:, 0] - 273.15
actual = (
np.square(ds.tmpf - temp_real2[:, None]).sum(dim='time')
/ ds.time.size)
desire = ds.tmpf_mc_var.values
# Validate on sections that were not used for calibration.
assert_almost_equal_verbose(
actual[16:32].mean(), desire[16:32].mean(), decimal=3)
assert_almost_equal_verbose(
actual[48:].mean(), desire[48:].mean(), decimal=3)
# tmpb
actual = (
np.square(ds.tmpb - temp_real2[:, None]).sum(dim='time')
/ ds.time.size)
desire = ds.tmpb_mc_var.values
# Validate on sections that were not used for calibration.
assert_almost_equal_verbose(
actual[16:32].mean(), desire[16:32].mean(), decimal=4)
assert_almost_equal_verbose(
actual[48:].mean(), desire[48:].mean(), decimal=4)
pass
def test_single_ended_ols_wls_estimate_synthetic():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
Without variance.
They should be the same as the parameters used to create the synthetic
measurment set"""
from dtscalibration import DataStore
cable_len = 100.
nt = 50
time = np.arange(nt)
x = np.linspace(0., cable_len, 500)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '0'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
# OLS
ds.calibration_single_ended(
sections=sections, method='ols', solver='sparse')
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=6)
assert_almost_equal_verbose(
ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=4)
# WLS
ds.calibration_single_ended(
sections=sections,
st_var=1.,
ast_var=1.,
method='wls',
solver='sparse')
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=6)
assert_almost_equal_verbose(
ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=4)
pass
def test_single_ended_ols_wls_fix_dalpha_synthetic():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
Without variance.
They should be the same as the parameters used to create the synthetic
measurment set"""
from dtscalibration import DataStore
cable_len = 100.
nt = 50
time = np.arange(nt)
x = np.linspace(0., cable_len, 500)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '0'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
# OLS
ds.calibration_single_ended(
sections=sections,
method='ols',
solver='sparse',
fix_dalpha=(dalpha_p - dalpha_m, 0.))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=11)
assert_almost_equal_verbose(
ds.dalpha.values, dalpha_p - dalpha_m, decimal=18)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=12)
# WLS
ds.calibration_single_ended(
sections=sections,
st_var=1.,
ast_var=1.,
method='wls',
solver='sparse',
fix_dalpha=(dalpha_p - dalpha_m, 0.))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=12)
assert_almost_equal_verbose(
ds.dalpha.values, dalpha_p - dalpha_m, decimal=14)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=10)
pass
def test_single_ended_ols_wls_fix_gamma_synthetic():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
Without variance.
They should be the same as the parameters used to create the synthetic
measurment set"""
from dtscalibration import DataStore
cable_len = 100.
nt = 50
time = np.arange(nt)
x = np.linspace(0., cable_len, 500)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '0'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
# OLS
ds.calibration_single_ended(
sections=sections,
method='ols',
solver='sparse',
fix_gamma=(gamma, 0.))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)
assert_almost_equal_verbose(
ds.dalpha.values, dalpha_p - dalpha_m, decimal=10)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=8)
# WLS
ds.calibration_single_ended(
sections=sections,
st_var=1.,
ast_var=1.,
method='wls',
solver='sparse',
fix_gamma=(gamma, 0.))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)
assert_almost_equal_verbose(
ds.dalpha.values, dalpha_p - dalpha_m, decimal=10)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=8)
pass
def test_single_ended_ols_wls_fix_gamma_fix_dalpha_synthetic():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
Without variance.
They should be the same as the parameters used to create the synthetic
measurment set"""
from dtscalibration import DataStore
cable_len = 100.
nt = 50
time = np.arange(nt)
x = np.linspace(0., cable_len, 500)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '0'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
# OLS
ds.calibration_single_ended(
sections=sections,
method='ols',
solver='sparse',
fix_gamma=(gamma, 0.),
fix_dalpha=(dalpha_p - dalpha_m, 0.))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)
assert_almost_equal_verbose(
ds.dalpha.values, dalpha_p - dalpha_m, decimal=18)
assert_almost_equal_verbose(
ds.tmpf.values, temp_real - 273.15, decimal=8) # 11 on 64-bit
# WLS
ds.calibration_single_ended(
sections=sections,
st_var=1.,
ast_var=1.,
method='wls',
solver='sparse',
fix_gamma=(gamma, 0.),
fix_dalpha=(dalpha_p - dalpha_m, 0.))
assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)
assert_almost_equal_verbose(
ds.dalpha.values, dalpha_p - dalpha_m, decimal=18)
assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=8)
pass
def test_single_ended_trans_att_synthetic():
"""Checks whether the transient attenuation routines perform as intended,
and calibrate to the correct temperature"""
from dtscalibration import DataStore
cable_len = 100.
nt = 50
nx = 200
time = np.arange(nt)
x = np.linspace(0., cable_len, nx)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
ts_ambient = np.ones(nt) * 12
ts_valid = np.ones(nt) * 16
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask1 = np.logical_and(x > 0.125 * cable_len, x < 0.25 * cable_len)
cold_mask2 = np.logical_and(x > 0.625 * cable_len, x < 0.75 * cable_len)
warm_mask1 = np.logical_and(x > 0.75 * cable_len, x < 0.875 * cable_len)
warm_mask2 = np.logical_and(x > 0.25 * cable_len, x < 0.375 * cable_len)
valid_mask = np.logical_and(x > 0.40 * cable_len, x < 0.50 * cable_len)
temp_real = np.ones((len(x), nt)) * 12 + 273.15
temp_real[cold_mask1 + cold_mask2] = ts_cold + 273.15
temp_real[warm_mask1 + warm_mask2] = ts_warm + 273.15
temp_real[valid_mask] = ts_valid + 273.15
st = C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)
# Add attenuation
tr_att = np.random.rand(nt) * .2 + 0.8
st[int(x.size * 0.4):] *= tr_att
tr_att2 = np.random.rand(nt) * .2 + 0.8
st[int(x.size * 0.6):] *= tr_att2
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm),
'ambient': (['time'], ts_ambient)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '0'})
sections = {
'ambient': [slice(.52 * cable_len, .58 * cable_len)],
'cold':
[
slice(0.125 * cable_len, 0.25 * cable_len),
slice(0.65 * cable_len, 0.70 * cable_len)],
'warm': [slice(0.25 * cable_len, 0.375 * cable_len)]}
ds_test = ds.copy(deep=True)
# OLS
ds_test.calibration_single_ended(
sections=sections,
method='ols',
trans_att=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
ds_test = ds.copy(deep=True)
# WLS
ds_test.calibration_single_ended(
sections=sections,
st_var=1.0,
ast_var=1.0,
method='wls',
trans_att=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
# test `trans_att` related functions
# Clear out old results
ds_test.set_trans_att([])
assert ds_test.trans_att.size == 0, 'clear out trans_att config'
del_keys = []
for k, v in ds_test.data_vars.items():
if 'trans_att' in v.dims:
del_keys.append(k)
assert len(del_keys) == 0, 'clear out trans_att config'
ds_test.calibration_single_ended(
sections=sections,
st_var=1.0,
ast_var=1.0,
method='wls',
transient_att_x=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
ds_test = ds.copy(deep=True)
# Test fixing gamma + transient att.
ds_test.calibration_single_ended(
sections=sections,
st_var=1.0,
ast_var=1.0,
method='wls',
fix_gamma=(482.6, 0),
trans_att=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
ds_test = ds.copy(deep=True)
# Test fixing alpha + transient att.
ds_test.calibration_single_ended(
sections=sections,
st_var=1.0,
ast_var=1.0,
method='wls',
fix_dalpha=(6.46e-05, 0),
trans_att=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
def test_single_ended_matching_sections_synthetic():
"""Checks whether the matching sections routines perform as intended,
and calibrate to the correct temperature"""
from dtscalibration import DataStore
cable_len = 100.
nt = 50
nx = 200
time = np.arange(nt)
x = np.linspace(0., cable_len, nx)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
ts_ambient = np.ones(nt) * 12
ts_valid = np.ones(nt) * 16
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask1 = np.logical_and(x > 0.125 * cable_len, x < 0.25 * cable_len)
cold_mask2 = np.logical_and(x > 0.625 * cable_len, x < 0.75 * cable_len)
warm_mask1 = np.logical_and(x > 0.75 * cable_len, x < 0.875 * cable_len)
warm_mask2 = np.logical_and(x > 0.25 * cable_len, x < 0.375 * cable_len)
valid_mask = np.logical_and(x > 0.40 * cable_len, x < 0.50 * cable_len)
temp_real = np.ones((len(x), nt)) * 12 + 273.15
temp_real[cold_mask1 + cold_mask2] = ts_cold + 273.15
temp_real[warm_mask1 + warm_mask2] = ts_warm + 273.15
temp_real[valid_mask] = ts_valid + 273.15
st = C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)
# Add attenuation
tr_att = np.random.rand(nt) * .2 + 0.8
st[int(x.size * 0.4):] *= tr_att
tr_att2 = np.random.rand(nt) * .2 + 0.8
st[int(x.size * 0.6):] *= tr_att2
ds = DataStore(
{
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm),
'ambient': (['time'], ts_ambient)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '0'})
sections = {
'cold': [slice(0.13 * cable_len, 0.24 * cable_len)],
'warm': [slice(0.26 * cable_len, 0.365 * cable_len)]}
matching_sections = [
(
slice(.01 * cable_len,
.09 * cable_len), slice(.51 * cable_len,
.59 * cable_len), True),
(
slice(.01 * cable_len,
.09 * cable_len), slice(.91 * cable_len,
.99 * cable_len), True)]
ds_test = ds.copy(deep=True)
# OLS
ds_test.calibration_single_ended(
sections=sections,
method='ols',
matching_sections=matching_sections,
trans_att=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
ds_test = ds.copy(deep=True)
# WLS
ds_test.calibration_single_ended(
sections=sections,
st_var=1.0,
ast_var=1.0,
method='wls',
matching_sections=matching_sections,
trans_att=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
ds_test = ds.copy(deep=True)
# Test fixing gamma + transient att.
ds_test.calibration_single_ended(
sections=sections,
st_var=1.0,
ast_var=1.0,
method='wls',
fix_gamma=(482.6, 0),
matching_sections=matching_sections,
trans_att=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
ds_test = ds.copy(deep=True)
# Test fixing dalpha + transient att.
ds_test.calibration_single_ended(
sections=sections,
st_var=1.0,
ast_var=1.0,
method='wls',
fix_dalpha=(6.46e-05, 0),
matching_sections=matching_sections,
trans_att=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
ds_test = ds.copy(deep=True)
# Test fixing gamma & dalpha + transient att.
ds_test.calibration_single_ended(
sections=sections,
st_var=1.0,
ast_var=1.0,
method='wls',
fix_gamma=(482.6, 0),
fix_dalpha=(6.46e-05, 0),
matching_sections=matching_sections,
trans_att=[40, 60],
solver='sparse')
assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)
assert_almost_equal_verbose(
ds_test.tmpf.values, temp_real - 273.15, decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=0).talpha, -np.log(tr_att), decimal=8)
assert_almost_equal_verbose(
ds_test.isel(trans_att=1).talpha, -np.log(tr_att2), decimal=8)
# Test conf. ints. for the combination of everything
ds_test.conf_int_single_ended(
p_val='p_val',
p_cov='p_cov',
st_var=1.0,
ast_var=1.0,
store_tmpf='tmpf',
store_tempvar='_var',
conf_ints=[2.5, 50., 97.5],
mc_sample_size=50)
ds_test_1 = ds_test.isel(time=-1)
ds_test_1.tmpf
ds_test_1.tmpf_mc.isel(CI=0).values
ds_test_1.tmpf_mc.isel(CI=2).values
assert np.all(
np.less(ds_test_1.tmpf_mc.isel(CI=0).values, ds_test_1.tmpf)
), 'Single-ended, trans. att.; 2.5% confidence interval is incorrect'
assert np.all(
np.greater(ds_test_1.tmpf_mc.isel(CI=2).values, ds_test_1.tmpf)
), 'Single-ended, trans. att.; 97.5% confidence interval is incorrect'
def test_single_ended_exponential_variance_estimate_synthetic():
"""Checks whether the coefficients are correctly defined by creating a
synthetic measurement set, and derive the parameters from this set.
With variance.
They should be the same as the parameters used to create the synthetic
measurment set"""
import dask.array as da
from dtscalibration import DataStore
state = da.random.RandomState(0)
stokes_m_var = 40.
astokes_m_var = 60.
cable_len = 100.
nt = 50
time = np.arange(nt)
x = np.linspace(0., cable_len, 500)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_p * x[:, None]) * \
np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)
ast = C_m * np.exp(-dalpha_r * x[:, None]) * \
np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)
st_m = st + stats.norm.rvs(size=st.shape, scale=stokes_m_var**0.5)
ast_m = ast + stats.norm.rvs(size=ast.shape, scale=astokes_m_var**0.5)
# print('alphaint', cable_len * (dalpha_p - dalpha_m))
# print('alpha', dalpha_p - dalpha_m)
# print('C', np.log(C_p / C_m))
# print('x0', x.max())
ds = DataStore(
{
# 'st': (['x', 'time'], st),
# 'ast': (['x', 'time'], ast),
'st': (['x', 'time'], st_m),
'ast': (['x', 'time'], ast_m),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)},
coords={
'x': x,
'time': time},
attrs={'isDoubleEnded': '0'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
st_label = 'st'
ast_label = 'ast'
mst_var, _ = ds.variance_stokes_exponential(
st_label=st_label, sections=sections)
mast_var, _ = ds.variance_stokes_exponential(
st_label=ast_label, sections=sections)
# MC variqnce
ds.calibration_single_ended(
sections=sections,
st_var=mst_var,
ast_var=mast_var,
method='wls',
solver='sparse')
ds.conf_int_single_ended(
p_val='p_val',
p_cov='p_cov',
st_var=mst_var,
ast_var=mast_var,
store_tmpf='tmpf',
store_tempvar='_var',
conf_ints=[2.5, 50., 97.5],
mc_sample_size=50,
da_random_state=state)
# Calibrated variance
stdsf1 = ds.ufunc_per_section(
label='tmpf', func=np.var, temp_err=True, calc_per='stretch', ddof=1)
# Use a single timestep to better check if the parameter uncertainties
# propagate
ds1 = ds.isel(time=1)
# Estimated VAR
stdsf2 = ds1.ufunc_per_section(
label='tmpf_mc_var', func=np.mean, temp_err=False, calc_per='stretch')
for (_, v1), (_, v2) in zip(stdsf1.items(), stdsf2.items()):
for v1i, v2i in zip(v1, v2):
v2i_c = float(v2i)
print('Real VAR: ', v1i, 'Estimated VAR: ', v2i_c)
assert_almost_equal_verbose(v1i, v2i_c, decimal=1)
pass
print('hoi')
def test_calibration_ols():
"""Testing ordinary least squares procedure. And compare with device calibrated temperature.
The measurements were calibrated by the device using only section 8--17.m. Those temperatures
are compared up to 2 decimals. Silixa only uses a single calibration constant (I think they
fix gamma), or a different formulation, see Shell primer.
"""
filepath = data_dir_double_ended2
ds = read_silixa_files(
directory=filepath, timezone_netcdf='UTC', file_ext='*.xml')
ds100 = ds.sel(x=slice(0, 100))
sections_ultima = {
'probe1Temperature': [slice(8., 17.)], # cold bath
}
ds100.calibration_double_ended(
sections=sections_ultima, store_tmpw='tmpw', method='ols')
np.testing.assert_array_almost_equal(
ds100['tmpw'].data, ds100.tmp.data, decimal=1)
ds009 = ds100.sel(x=sections_ultima['probe1Temperature'][0])
np.testing.assert_array_almost_equal(
ds009['tmpw'].data, ds009.tmp.data, decimal=2)
pass
def test_calibrate_wls_procedures():
x = np.linspace(0, 10, 25 * 4)
np.random.shuffle(x)
X = x.reshape((25, 4))
beta = np.array([1, 0.1, 10, 5])
beta_w = np.concatenate((np.ones(10), np.ones(15) * 1.0))
beta_0 = np.array([1, 1, 1, 1])
y = np.dot(X, beta)
y_meas = y + np.random.normal(size=y.size)
# first check unweighted convergence
beta_numpy = np.linalg.lstsq(X, y, rcond=None)[0]
np.testing.assert_array_almost_equal(beta, beta_numpy, decimal=8)
ps_sol, ps_var = wls_stats(X, y, w=1, calc_cov=0)
p_sol, p_var = wls_sparse(X, y, w=1, calc_cov=0, x0=beta_0)
np.testing.assert_array_almost_equal(beta, ps_sol, decimal=8)
np.testing.assert_array_almost_equal(beta, p_sol, decimal=8)
# now with weights
dec = 8
ps_sol, ps_var, ps_cov = wls_stats(
X, y_meas, w=beta_w, calc_cov=True, x0=beta_0)
p_sol, p_var, p_cov = wls_sparse(
X, y_meas, w=beta_w, calc_cov=True, x0=beta_0)
np.testing.assert_array_almost_equal(p_sol, ps_sol, decimal=dec)
np.testing.assert_array_almost_equal(p_var, ps_var, decimal=dec)
np.testing.assert_array_almost_equal(p_cov, ps_cov, decimal=dec)
# Test array sparse
Xsp = sp.coo_matrix(X)
psp_sol, psp_var, psp_cov = wls_stats(Xsp, y_meas, w=beta_w, calc_cov=True)
np.testing.assert_array_almost_equal(p_sol, psp_sol, decimal=dec)
np.testing.assert_array_almost_equal(p_var, psp_var, decimal=dec)
np.testing.assert_array_almost_equal(p_cov, psp_cov, decimal=dec)
pass
def test_average_measurements_single_ended():
filepath = data_dir_single_ended
ds_ = read_silixa_files(
directory=filepath, timezone_netcdf='UTC', file_ext='*.xml')
ds = ds_.sel(x=slice(0, 100)) # only calibrate parts of the fiber
sections = {
'probe2Temperature': [slice(6., 14.)] # warm bath
}
ds.sections = sections
st_var, ast_var = 5., 5.
ds.calibration_single_ended(
st_var=st_var, ast_var=ast_var, method='wls', solver='sparse')
ds.average_single_ended(
p_val='p_val',
p_cov='p_cov',
st_var=st_var,
ast_var=ast_var,
store_tmpf='tmpf',
store_tempvar='_var',
conf_ints=[2.5, 97.5],
mc_sample_size=50, # <- choose a much larger sample size
ci_avg_x_flag1=True,
ci_avg_x_sel=slice(6., 14.))
ix = ds.get_section_indices(slice(6, 14))
ds.average_single_ended(
p_val='p_val',
p_cov='p_cov',
st_var=st_var,
ast_var=ast_var,
store_tmpf='tmpf',
store_tempvar='_var',
conf_ints=[2.5, 97.5],
mc_sample_size=50, # <- choose a much larger sample size
ci_avg_x_flag2=True,
ci_avg_x_isel=ix)
sl = slice(
np.datetime64('2018-05-04T12:22:17.710000000'),
np.datetime64('2018-05-04T12:22:47.702000000'))
ds.average_single_ended(
p_val='p_val',
p_cov='p_cov',
st_var=st_var,
ast_var=ast_var,
store_tmpf='tmpf',
store_tempvar='_var',
conf_ints=[2.5, 97.5],
mc_sample_size=50, # <- choose a much larger sample size
ci_avg_time_flag1=True,
ci_avg_time_flag2=False,
ci_avg_time_sel=sl)
ds.average_single_ended(
p_val='p_val',
p_cov='p_cov',
st_var=st_var,
ast_var=ast_var,
store_tmpf='tmpf',
store_tempvar='_var',
conf_ints=[2.5, 97.5],
mc_sample_size=50, # <- choose a much larger sample size
ci_avg_time_flag1=False,
ci_avg_time_flag2=True,
ci_avg_time_isel=range(3))
pass
def test_average_measurements_double_ended():
filepath = data_dir_double_ended2
ds_ = read_silixa_files(
directory=filepath, timezone_netcdf='UTC', file_ext='*.xml')
ds = ds_.sel(x=slice(0, 100)) # only calibrate parts of the fiber
sections = {
'probe1Temperature': [slice(7.5, 17.),
slice(70., 80.)], # cold bath
'probe2Temperature': [slice(24., 34.),
slice(85., 95.)], # warm bath
}
ds.sections = sections
st_var, ast_var, rst_var, rast_var = 5., 5., 5., 5.
ds.calibration_double_ended(
st_var=st_var,
ast_var=ast_var,
rst_var=rst_var,
rast_var=rast_var,
store_tmpw='tmpw',
method='wls',
solver='sparse')
ds.average_double_ended(
p_val='p_val',
p_cov='p_cov',
st_var=st_var,
ast_var=ast_var,
rst_var=rst_var,
rast_var=rast_var,
store_tmpf='tmpf',
store_tmpb='tmpb',
store_tmpw='tmpw',
store_tempvar='_var',
conf_ints=[2.5, 97.5],
mc_sample_size=50, # <- choose a much larger sample size
ci_avg_x_flag1=True,
ci_avg_x_sel=slice(6, 10))
ix = ds.get_section_indices(slice(6, 10))
ds.average_double_ended(
p_val='p_val',
p_cov='p_cov',
st_var=st_var,
ast_var=ast_var,
rst_var=rst_var,
rast_var=rast_var,
store_tmpf='tmpf',
store_tmpb='tmpb',
store_tmpw='tmpw',
store_tempvar='_var',
conf_ints=[2.5, 97.5],
mc_sample_size=50, # <- choose a much larger sample size
ci_avg_x_flag2=True,
ci_avg_x_isel=ix)
sl = slice(
np.datetime64('2018-03-28T00:40:54.097000000'),
np.datetime64('2018-03-28T00:41:12.084000000'))
ds.average_double_ended(
p_val='p_val',
p_cov='p_cov',
st_var=st_var,
ast_var=ast_var,
rst_var=rst_var,
rast_var=rast_var,
store_tmpf='tmpf',
store_tmpb='tmpb',
store_tmpw='tmpw',
store_tempvar='_var',
conf_ints=[2.5, 97.5],
mc_sample_size=50, # <- choose a much larger sample size
ci_avg_time_flag1=True,
ci_avg_time_flag2=False,
ci_avg_time_sel=sl)
ds.average_double_ended(
p_val='p_val',
p_cov='p_cov',
st_var=st_var,
ast_var=ast_var,
rst_var=rst_var,
rast_var=rast_var,
store_tmpf='tmpf',
store_tmpb='tmpb',
store_tmpw='tmpw',
store_tempvar='_var',
conf_ints=[2.5, 97.5],
mc_sample_size=50, # <- choose a much larger sample size
ci_avg_time_flag1=False,
ci_avg_time_flag2=True,
ci_avg_time_isel=range(3))
pass
| 33.409078
| 97
| 0.584567
| 17,198
| 117,032
| 3.715548
| 0.034597
| 0.040313
| 0.04975
| 0.069859
| 0.935759
| 0.91939
| 0.907981
| 0.898685
| 0.892207
| 0.879844
| 0
| 0.049377
| 0.26315
| 117,032
| 3,502
| 98
| 33.418618
| 0.691622
| 0.069255
| 0
| 0.866762
| 0
| 0
| 0.054053
| 0.012291
| 0
| 0
| 0
| 0
| 0.074499
| 1
| 0.013968
| false
| 0.011819
| 0.015401
| 0
| 0.030086
| 0.017192
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
207fbbcac03bd201df963127dc8b0d314f47b4ab
| 102,712
|
py
|
Python
|
student_registration.py
|
Diptyangshu-code/Student-Management-System
|
e8e6e2fea3c2ac8d9faa32c8ee8163bdf9b3d4e6
|
[
"Apache-2.0"
] | null | null | null |
student_registration.py
|
Diptyangshu-code/Student-Management-System
|
e8e6e2fea3c2ac8d9faa32c8ee8163bdf9b3d4e6
|
[
"Apache-2.0"
] | null | null | null |
student_registration.py
|
Diptyangshu-code/Student-Management-System
|
e8e6e2fea3c2ac8d9faa32c8ee8163bdf9b3d4e6
|
[
"Apache-2.0"
] | null | null | null |
import os
class check:
def eligibility(s):
print(" Select the caste of the student \n")
print("1. ST (Schedule Tribe) \n")
print("2. SC (Schedule Caste) \n")
print("3. OBC (Other Backword Classes \n ")
print("4. General \n")
print("0. EXIT \n")
c=int(input(" Enter your choice : "))
if(c==1):
print("\n Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="45000"
if(c6>=225):
print("\n Student is eligible for admission \n")
print("Fees- Rs",c7)
else:
print(" Sorry !! Student is not eligible for Admission \n")
elif(c==2):
print("\n Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="45000"
if(c6>=225):
print("Student is eligible for admission \n")
print("Fees- Rs",c7)
else:
print(" Sorry !! Student is not eligible for Admission \n")
elif(c==3):
print(" \nEnter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="54000"
if(c6>=275):
print("Student is eligible for admission \n")
print("Fees- Rs",c7)
else:
print(" Sorry !! Student is not eligible for Admission \n")
elif(c==4):
print("\n Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="65000"
if(c6>=300):
print("Student is eligible for admission \n")
print("Fees- Rs",c7)
else:
print(" Sorry !! Student is not eligible for Admission \n")
elif(c==0):
exit(0)
else:
print("\n !!! Enter correct option !!!\n")
class student:
def registration(s):
a=input(" Enter student's First Name : ")
print("\n")
b=input(" Enter student's Last Name : ")
print("\n")
c=input(" Enter student's Address : ")
print("\n")
d=input(" Enter student's Contact-Number : ")
print("\n")
e=input(" Have the student paid the bill ? y/n ")
if(e=='y'):
pay="paid full"
pay1= "00.00"
elif(e=='n'):
pay="unpaid"
s=int(input(" Enter the amount student paid ? "))
w=c8-s
q=str(w)
pay1=q
abc=open(a+".txt","x")
abc=open(a+".txt","w")
abc.write("-----------------------------------------------------------******----CENTRAL INSTITUTE OF TECHNOLOGY, KOKRAJHAR----******--------------------------------------------------------------------------------")
abc.write("\n")
abc.write("----------------------------------------------------------(A Centrally Funded Institute under ministry of HRD, Govt.of India)-------------------------------------------------------------------------------")
abc.write("\n")
abc.write("---------------------------------------------------------------------Kokrajhar, BTAD, Assam, India, PIN-783370---------------------------------------------------------------------------------------------------")
abc.write("\n")
abc.write("\n")
abc.write("\n")
abc.write("----------------------------------------------------------------------******----Student's Registration-----*****--------------------------------------------------------------------")
abc.write("\n")
abc.write("\n")
abc.write("First Name : "+a)
abc.write("\n")
abc.write("Last Name : "+b)
abc.write("\n")
abc.write("Address : "+c)
abc.write("\n")
abc.write("Contact-Number : "+d)
abc.write("\n")
abc.write(" Branch : "+branch)
abc.write("\n")
abc.write("Elective : "+e1)
abc.write("\n")
abc.write("Payment Status- Rs : "+pay)
abc.write("\n")
abc.write("Due Amount - Rs : "+pay1)
abc.write("\n\n")
abc.write("\n")
abc.write("\n")
abc.write("-------------------------------------------------------------------------------------THANK YOU--------------------------------------------------------------------------------------------------------------------------------")
abc.close()
billing=open(a+"_bill.txt","x")
billing=open(a+"_bill.txt","w")
billing.write("---------------------------------------------------********--CENTRAL INSTITUTE OF TECHNOLOGY, KOKRAJHAR--********---------------------------------------------------------------------------------------")
billing.write("\n")
billing.write("-------------------------------------------------(A Centrally Funded Institute under ministry of HRD, Govt.of India)----------------------------------------------------------------------------")
billing.write("\n")
billing.write("------------------------------------------------------------Kokrajhar, BTAD, Assam, India, PIN-783370------------------------------------------------------------------------------------------")
billing.write("\n")
billing.write("\n")
billing.write("\n")
billing.write("-----------------------------------------------------------------****---Student's Billing---****-----------------------------------------------------------------------------------------")
billing.write("\n")
billing.write("\n")
billing.write("First Name : "+a)
billing.write("\n")
billing.write("Last Name : "+b)
billing.write("\n")
billing.write("Address : "+c)
billing.write("\n")
billing.write("Contact-Number : "+d)
billing.write("\n")
billing.write("Branch : "+branch)
billing.write("\n")
billing.write("Elective : "+e1)
billing.write("\n")
billing.write("Total Amount : "+c7)
billing.write("\n")
billing.write(" Payment Status - Rs : "+pay)
billing.write("\n")
billing.write("Due Amount - Rs : "+pay1)
billing.write("\n")
billing.write("\n")
billing.write("\n")
billing.write("\n")
billing.write("-------------------------------------------------------------------------------THANK YOU------------------------------------------------------------------------------------------")
billing.close()
billing=open(a+"_bill.txt","r")
print(billing.read())
def search(s):
s1=input("Type student's name to search : ")
if(os.path.exists(s1+".txt")):
print(s1, " is a registered student ")
print("\n")
print(" Do you want to view the details of ",s1," ? y/n")
s2=input()
print("\n")
if(s2=='y'):
abc=open(s1+".txt","r")
print(abc.read())
else:
print(" Thank You ")
else:
print(" No student is registered by ",s1)
print("\n Thank You \n")
def search_bill(s):
s1=input("Type student's name to search : ")
if(os.path.exists(s1+"_bill.txt")):
print(s1, " is a registered student ")
print("\n")
print(" Do you want to view the bill of ",s1," ? y/n")
s2=input()
print("\n")
if(s2=='y'):
billing=open(s1+"_bill.txt","r")
print(billing.read())
else:
print(" Thank You ")
else:
print(" No student is registered by ",s1)
print("\n Thank You \n")
print("\n")
print("\n")
print("-----------------------------------------------------------------CENTRAL INSTITUTE OF TECHNOLOGY, KOKRAJHAR----------------------------------------------------------------------------------------------")
print("----------------------------------------------------(A Centrally Funded Institute under ministry of HRD, Govt. of India)---------------------------------------------------------------------------------")
print("-----------------------------------------------------------------Kokrajhar, BTAD, Assam, India, PIN-783370------------------------------------------------------------------------------------------------")
print("\n")
print("\n")
print("------------------------------------------------------------------Welcome to Student's Registration System--------------------------------------------------------------------------------------------")
print("\n")
print("\n")
obj1=student()
obj2=check()
f=0
while(1):
print(" Select Your Option from below : \n")
print(" 1. Registration \n")
print(" 2. Search Student Info \n")
print(" 3.Eligibility \n")
print(" 4. Search bill of students \n")
print(" 0. EXIT \n ")
s3=int(input(" Enter your choice : "))
print("\n")
if(s3==1):
while(1):
print(" Which Department Student want to take admission in ? ")
print("\n")
print(" 1. Computer Science Engineering ")
print(" 2. Electrical Engineering ")
print(" 3. Mechanical Engineering ")
print(" 4. BACK TO PREVIOUS OPTIONS ")
print(" 0. EXIT \n")
print("\n")
s4=int(input(" Enter your choice : "))
if(s4==1):
print("Enter total number of seats allocation for B.Tech of Computer Science : ")
seats=int(input())
for i in range(0,seats):
if(f<=seats):
print(" Select the caste of the student \n ")
print("1. ST (Schedule Tribe) \n ")
print("2. SC (Schedule Caste) \n ")
print("3. OBC (Other Backword Classes) \n ")
print("4. General(Unreserved) \n ")
print("5. BACK TO PREVIOUS OPTIONS \n")
print("0. EXIT \n")
c=int(input())
if(c==1):
print("\n Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
if(c6>=225):
print("\nStudent is eligible for admission \n")
c7="45000"
c8=45000
print("Select Elective from below : ")
print("1.Cyber Security Majors \n")
print("2.Internet of things (IOT) \n ")
print("3. Robotics \n")
print("4. BACK TO PREVIOUS OPTIONS \n")
print("0. EXIT \n")
e=int(input(" Enter your choice : "))
if(e==1):
branch=" Computer Science Engineering "
e1="Cyber Security Majors"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Computer Science Engineering "
e1="Internet of things (IOT)"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Computer Science Engineering "
e1="Robotics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==4):
break
elif(e==0):
exit(0)
else:
print("\n !!! Enter correct input !!! \n")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==2):
print(" \nEnter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="45000"
c8=45000
if(c6>=225):
print("\nStudent is eligible for admission \n")
c7="45000"
c8=45000
print("Select Elective from below : ")
print("1. Cyber Security Majors \n")
print("2. Internet of things (IOT) \n ")
print("3. Robotics \n")
print("4. BACK TO PREVIOUS OPTIONS \n")
print("0. EXIT \n")
e=int(input(" Enter your choice : "))
if(e==1):
branch=" Computer Science Engineering "
e1="Cyber Security Majors"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Computer Science Engineering "
e1="Internet of things (IOT)"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Computer Science Engineering "
e1="Robotics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==4):
break
elif(e==0):
exit(0)
else:
print("\n !!! Enter correct option !!!\n ")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==3):
print("\n Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="54000"
if(c6>=275):
print("Student is eligible for admission \n")
c7="54000"
c8=54000
print("Select Elective from below : ")
print("1. Cyber Security Majors \n")
print("2. Internet of things (IOT) \n ")
print("3. Robotics \n")
print("4. BACK TO PREVIOUS OPTIONS \n")
print("0. EXIT \n")
e=int(input(" Enter your choice : "))
if(e==1):
branch=" Computer Science Engineering "
e1="Cyber Security Majors"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Computer Science Engineering "
e1="Internet of things (IOT)"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Computer Science Engineering "
e1="Robotics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==4):
break
elif(e==0):
exit(0)
else:
print(" \n !!! Enter correct option !!!\n ")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==4):
print(" Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="65000"
if(c6>=300):
print("\nStudent is eligible for admission \n")
c7="65000"
c8=65000
print("Select Elective from below : ")
print("1.Cyber Security Majors \n")
print("2.Internet of things (IOT) \n ")
print("3. Robotics \n")
print("4. BACK TO PREVIOUS OPTIONS \n")
print("0. EXIT \n")
e=int(input("Enter your choice : "))
if(e==1):
branch=" Computer Science Engineering "
e1="Cyber Security Majors"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Computer Science Engineering "
e1="Internet of things (IOT)"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Computer Science Engineering "
e1="Robotics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==4):
break
elif(e==0):
exit(0)
else:
print("\n !!! Enter correct option !!!\n")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==5):
break
elif(c==0):
exit(0)
else:
print(" \n!!! Enter correct option !!! \n")
break
elif(s4==2):
print("Enter total number of seats allocation for B.Tech of Electrical Engineering : ")
seats=int(input())
for i in range(0,seats):
if(f<=seats):
print(" Select the caste of the student \n ")
print("1. ST (Schedule Tribe) \n ")
print("2. SC (Schedule Caste) \n ")
print("3. OBC (Other Backword Classes) \n ")
print("4. General(Unreserved) \n ")
print("5.BACK TO PREVIOUS OPTIONS \n")
print("0. EXIT \n")
c=int(input(" Enter your choice : "))
if(c==1):
print(" Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
if(c6>=225):
print("\nStudent is eligible for admission \n")
c7="45000"
c8=45000
print("Select Elective from below : ")
print("1.Environmental Studies \n")
print("2.Computer Programming \n ")
print("3.Design Thinking \n")
print("4. BACK TO PREVIOUS OPTIONS \n")
print("0. EXIT \n")
e=int(input("Enter your choice : "))
if(e==1):
branch=" Electrical Engineering "
e1="Environmental Studies"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Electrical Engineering "
e1="Computer Programming"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Electrical Engineering "
e1="Design Thinking"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==4):
break
elif(e==0):
exit(0)
else:
print(" \n!!! Enter correct option !!!\n")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==2):
print(" Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="45000"
c8=45000
if(c6>=225):
print("Student is eligible for admission \n")
c7="45000"
c8=45000
print("Select Elective from below : ")
print("1.Environmental Studies \n")
print("2.Computer Programming \n ")
print("3.Design Thinking \n")
print("4.BACK TO PREVIOUS OPTIONS \n ")
print("0. EXIT \n")
e=int(input(" Enter your choice : "))
if(e==1):
branch=" Electrical Engineering "
e1="Environmental Studies"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
elif(e==2):
branch=" Electrical Engineering "
e1="Computer Programming"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
elif(e==3):
branch=" Electrical Engineering "
e1="Design Thinking"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
elif(e==4):
break
elif(e==0):
exit(0)
else:
print("\n !!! Enter correct option !!! \n")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==3):
print("\n Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="54000"
if(c6>=275):
print("\nStudent is eligible for admission \n")
c7="54000"
c8=54000
print("Select Elective from below : ")
print("1.Environmental Studies \n")
print("2.Computer Programming \n ")
print("3.Design Thinking \n")
print("4.BACK TO PREVIOUS OPTIONS \n")
print("0. EXIT \n")
e=int(input("Enter your choice : "))
if(e==1):
branch=" Electrical Engineering "
e1="Environmental Studies"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Electrical Engineering "
e1="Computer Programming"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Electrical Engineering "
e1="Design Thinking"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==4):
break
elif(e==0):
exit(0)
else:
print("\n !!! Enter correct choice !!!\n")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==4):
print(" \nEnter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="65000"
if(c6>=300):
print("\n Student is eligible for admission \n")
c7="65000"
c8=65000
print("Select Elective from below : ")
print("1.Environmental Studies \n")
print("2.Computer Programming \n ")
print("3.Design Thinking \n")
print("4. BACK TO PREVIOUS OPTIONS \n")
print("0. EXIT \n")
e=int(input(" Enter your choice : "))
if(e==1):
branch=" Electrical Engineering "
e1="Environmental Studies"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
elif(e==2):
branch=" Electrical Engineering "
e1="Computer Programming"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
elif(e==3):
branch=" Electrical Engineering "
e1="Design Thinking"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
elif(e==4):
break
elif(e==0):
exit(0)
else:
print("\n !!! Enter correct option!!! \n ")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==5):
break
elif(c==0):
exit(0)
else:
print(" \n!!! Enter correct option !!!\n")
break
elif(s4==3):
print("Enter total number of seats allocation for B.Tech of Mechanical Engineering : ")
seats=int(input(" Enter number of seats : "))
for i in range(0,seats):
if(f<=seats):
print(" Select the caste of the student \n ")
print("1. ST (Schedule Tribe) \n ")
print("2. SC (Schedule Caste) \n ")
print("3. OBC (Other Backword Classes) \n ")
print("4. General(Unreserved) \n ")
print("5.BACK TO PREVIOUS OPTIONS \n ")
print("0. EXIT \n")
c=int(input(" Enter your choice : "))
if(c==1):
print(" Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
if(c6>=225):
print("\nStudent is eligible for admission \n")
c7="45000"
c8=45000
print("Select Elective from below : ")
print("1.Robotics \n")
print("2.Intermediate Fluid Mechanics \n ")
print("3.Computer Aided Design and Manufacturing \n")
print("4.BACK TO PREVIOUS OPTIONS\n")
print("0. EXIT \n")
e=int(input(" Enter your choice "))
if(e==1):
branch=" Mechanical Engineering "
e1="Robotics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Mechanical Engineering "
e1="Intermediate Fluid Mechanics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Mechanical Engineering "
e1="Computer Aided Design and Manufacturing "
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==0):
exit(0)
elif(e==4):
break
else:
print("\n !!! Enter correct option !!!\n ")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==2):
print(" Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="45000"
c8=45000
if(c6>=225):
print("Student is eligible for admission \n")
c7="45000"
c8=45000
print("Select Elective from below : ")
print("1.Robotics \n")
print("2.Intermediate Fluid Mechanics \n ")
print("3.Computer Aided Design and Manufacturing \n")
print("4.BACK TO PREVIOUS OPTIONS\n")
print("0. EXIT \n")
e=int(input(" Enter your choice : "))
if(e==1):
branch=" Mechanical Engineering "
e1="Robotics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Mechanical Engineering "
e1="Intermediate Fluid Mechanics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Mechanical Engineering "
e1="Computer Aided Design and Manufacturing"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==4):
break
elif(e==0):
exit(0)
else:
print("\n !!! Enter correct option !!! \n")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==3):
print(" Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="54000"
if(c6>=275):
print("Student is eligible for admission \n")
c7="54000"
c8=54000
print("Select Elective from below : ")
print("1.Robotics \n")
print("2.Intermediate Fluid Mechanics \n ")
print("3.Computer Aided Design and Manufacturing \n")
print("4.BACK TO PREVIOUS OPTIONS\n")
print("0. EXIT \n")
e=int(input("Enter your choice : "))
if(e==1):
branch=" Mechanical Engineering "
e1="Robotics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Mechanical Engineering "
e1="Intermediate Fluid Mechanics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Mechanical Engineering "
e1="Computer Aided Design and Manufacturing"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==4):
break
elif(e==0):
exit(0)
else:
print(" \n !!! Enter correct option !!! \n ")
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==4):
print(" Enter marks of Higher Secondary \n")
y1=int(input(" English : "))
print("\n")
y2=int(input(" Mathematics : "))
print("\n")
y3=int(input(" Physics : "))
print("\n")
y4=int(input(" Chemistry : "))
print("\n")
y5=int(input(" Computer Science : "))
print("\n")
y6=int(input(" Biology : "))
print("\n")
y7=int(input(" Physical Education : "))
print("\n")
y8=int(input(" Alternative English : "))
a=list()
a.append(y1)
a.append(y2)
a.append(y3)
a.append(y4)
a.append(y5)
a.append(y6)
a.append(y7)
a.append(y8)
a.sort()
a.reverse()
c6=a[0]+a[1]+a[2]+a[3]+a[4]
c7="65000"
if(c6>=300):
print("Student is eligible for admission \n")
c7="65000"
c8=65000
print("Select Elective from below : ")
print("1.Robotics \n")
print("2.Intermediate Fluid Mechanics \n ")
print("3.Computer Aided Design and Manufacturing \n")
print("4.BACK TO PREVIOUS OPTIONS\n")
print("0. EXIT \n")
e=int(input(" Enter your choice : "))
if(e==1):
branch=" Mechanical Engineering "
e1="Robotics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==2):
branch=" Mechanical Engineering "
e1="Intermediate Fluid Mechanics"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==3):
branch=" Mechanical Engineering "
e1="Computer Aided Design and Manufacturing"
obj1.registration()
f+=1
print(f," seats are filled up out of ",seats)
print("\n")
elif(e==4):
break
elif(e==0):
exit(0)
else:
print(" \n!!! Enter correct option !!! \n")
break
else:
print(" Sorry !! Student is not eligible for Admission \n")
break
elif(c==5):
break
elif(c==0):
exit(0)
else:
print("\n !!! Enter correct option !!!\n ")
break
else:
print(" !!Sorry no Seats are available !!")
elif(s4==4):
break
elif(s4==0):
exit(0)
else:
print(" \n !!! Enter correct option !!!\n")
break
elif(s3==2):
obj1.search()
elif(s3==3):
obj2.eligibility()
elif(s3==4):
obj1.search_bill()
elif(s3==0):
break
print("\n")
print("\n")
print("------------------------------------------------------------------------THANK YOU----------------------------------------------------------------------------")
print("\n")
print("-------------------------------------------------Copyright © 2019 CIT,KOKRAJHAR INDIA, Inc. All rights reserved----------------------------------------------------------")
print("\n")
print("\n")
print("\n")
print("\n")
| 60.992874
| 259
| 0.1792
| 4,663
| 102,712
| 3.945958
| 0.046322
| 0.062935
| 0.033261
| 0.035217
| 0.928967
| 0.897446
| 0.881467
| 0.877446
| 0.872554
| 0.86962
| 0
| 0.038658
| 0.737071
| 102,712
| 1,683
| 260
| 61.029115
| 0.642635
| 0
| 0
| 0.913807
| 0
| 0
| 0.147493
| 0.026229
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003192
| false
| 0
| 0.000798
| 0
| 0.005587
| 0.323224
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2094bda4b85ab52673bdd68dad8cf2b7efad4452
| 133
|
py
|
Python
|
src/utils/paths.py
|
OpenXAIProject/Variational_Dropouts
|
61ac484edb2101115bd5f5a1a0053014e6669183
|
[
"Apache-2.0"
] | 13
|
2018-11-30T01:16:32.000Z
|
2021-09-10T21:16:22.000Z
|
src/utils/paths.py
|
OpenXAIProject/Variational_Dropouts
|
61ac484edb2101115bd5f5a1a0053014e6669183
|
[
"Apache-2.0"
] | null | null | null |
src/utils/paths.py
|
OpenXAIProject/Variational_Dropouts
|
61ac484edb2101115bd5f5a1a0053014e6669183
|
[
"Apache-2.0"
] | 6
|
2018-11-30T01:44:20.000Z
|
2020-04-26T19:51:05.000Z
|
MNIST_PATH = "/home/john/datasets/mnist"
CIFAR10_PATH = "/home/john/datasets/cifar10"
CIFAR100_PATH = "/home/john/datasets/cifar100"
| 33.25
| 46
| 0.774436
| 18
| 133
| 5.555556
| 0.388889
| 0.24
| 0.36
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080645
| 0.067669
| 133
| 3
| 47
| 44.333333
| 0.725806
| 0
| 0
| 0
| 0
| 0
| 0.601504
| 0.601504
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
45d8907139c1384c8747f01b635704a11877910a
| 2,146
|
py
|
Python
|
tests/asp/AllAnswerSets/aggregates/count.example9.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19
|
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/asp/AllAnswerSets/aggregates/count.example9.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80
|
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/asp/AllAnswerSets/aggregates/count.example9.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6
|
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
1 2 3 2 3 4 5
1 3 3 2 2 4 5
1 4 3 2 2 3 5
1 5 0 0
1 6 3 2 7 8 9
1 7 3 2 6 8 9
1 8 3 2 6 7 9
1 9 0 0
1 10 3 2 11 12 13
1 11 3 2 10 12 13
1 12 3 2 10 11 13
1 13 0 0
1 14 1 0 4
1 15 1 0 3
1 16 1 0 2
1 17 1 0 8
1 18 1 0 7
1 19 1 0 6
2 21 3 0 1 16 15 14
1 20 1 0 21
2 23 3 0 1 16 15 14
2 24 3 0 3 16 15 14
1 22 2 1 24 23
2 26 3 0 1 19 18 17
1 25 1 0 26
2 28 3 0 1 12 11 10
1 27 1 0 28
0
25 okay2
14 a(3)
15 a(2)
16 a(1)
20 okay
27 okay3
10 e(1)
11 e(2)
12 e(3)
6 d(1)
7 d(2)
8 d(3)
17 b(3)
18 b(2)
19 b(1)
22 okay1
2 c(1)
3 c(2)
4 c(3)
0
B+
0
B-
1
0
1
"""
output = """
{c(1), d(1), e(1), a(1), b(1), okay, okay1, okay2, okay3}
{c(1), d(1), e(2), a(1), b(1), okay, okay1, okay2, okay3}
{c(1), d(1), e(3), a(1), b(1), okay, okay1, okay2, okay3}
{c(1), d(2), e(1), a(1), b(2), okay, okay1, okay2, okay3}
{c(1), d(2), e(2), a(1), b(2), okay, okay1, okay2, okay3}
{c(1), d(2), e(3), a(1), b(2), okay, okay1, okay2, okay3}
{c(1), d(3), e(1), a(1), b(3), okay, okay1, okay2, okay3}
{c(1), d(3), e(2), a(1), b(3), okay, okay1, okay2, okay3}
{c(1), d(3), e(3), a(1), b(3), okay, okay1, okay2, okay3}
{c(2), d(1), e(1), a(2), b(1), okay, okay1, okay2, okay3}
{c(2), d(1), e(2), a(2), b(1), okay, okay1, okay2, okay3}
{c(2), d(1), e(3), a(2), b(1), okay, okay1, okay2, okay3}
{c(2), d(2), e(1), a(2), b(2), okay, okay1, okay2, okay3}
{c(2), d(2), e(2), a(2), b(2), okay, okay1, okay2, okay3}
{c(2), d(2), e(3), a(2), b(2), okay, okay1, okay2, okay3}
{c(2), d(3), e(1), a(2), b(3), okay, okay1, okay2, okay3}
{c(2), d(3), e(2), a(2), b(3), okay, okay1, okay2, okay3}
{c(2), d(3), e(3), a(2), b(3), okay, okay1, okay2, okay3}
{c(3), d(1), e(1), a(3), b(1), okay, okay1, okay2, okay3}
{c(3), d(1), e(2), a(3), b(1), okay, okay1, okay2, okay3}
{c(3), d(1), e(3), a(3), b(1), okay, okay1, okay2, okay3}
{c(3), d(2), e(1), a(3), b(2), okay, okay1, okay2, okay3}
{c(3), d(2), e(2), a(3), b(2), okay, okay1, okay2, okay3}
{c(3), d(2), e(3), a(3), b(2), okay, okay1, okay2, okay3}
{c(3), d(3), e(1), a(3), b(3), okay, okay1, okay2, okay3}
{c(3), d(3), e(2), a(3), b(3), okay, okay1, okay2, okay3}
{c(3), d(3), e(3), a(3), b(3), okay, okay1, okay2, okay3}
"""
| 24.953488
| 57
| 0.511184
| 607
| 2,146
| 1.807249
| 0.065898
| 0.221513
| 0.344576
| 0.467639
| 0.730173
| 0.701003
| 0.684594
| 0.684594
| 0.663628
| 0.601641
| 0
| 0.28877
| 0.21575
| 2,146
| 85
| 58
| 25.247059
| 0.363042
| 0
| 0
| 0.023529
| 0
| 0.317647
| 0.985555
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
b3042207f6a1b237db2792be700fe67f8a868c36
| 150
|
py
|
Python
|
src/survivor_scraping/season/season_load.py
|
SeanAmmirati/survivor_processing
|
b4a032ee9e9ad55cf0b15c46ee9a8028be4bafe8
|
[
"MIT"
] | null | null | null |
src/survivor_scraping/season/season_load.py
|
SeanAmmirati/survivor_processing
|
b4a032ee9e9ad55cf0b15c46ee9a8028be4bafe8
|
[
"MIT"
] | null | null | null |
src/survivor_scraping/season/season_load.py
|
SeanAmmirati/survivor_processing
|
b4a032ee9e9ad55cf0b15c46ee9a8028be4bafe8
|
[
"MIT"
] | null | null | null |
from ..helpers.load_helpers import upsert
def load_seasons(transformed_seasons, eng):
upsert(transformed_seasons, eng, 'season', ['season_id'])
| 25
| 61
| 0.766667
| 19
| 150
| 5.789474
| 0.578947
| 0.327273
| 0.381818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113333
| 150
| 5
| 62
| 30
| 0.827068
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b30a989f69be5139d101f877b771780c49549dab
| 2,544
|
py
|
Python
|
Compile-pyc-main/compile-v7.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
Compile-pyc-main/compile-v7.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
Compile-pyc-main/compile-v7.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
# ENCYPT BY ALDI
# YOUTUBE MR.1557
exec((lambda _ : (__import__('base64').b85decode(_)))(b'BOxz!b8;_YX>KoYd30!RZVDqHEh;S_V{c?>Zf80mb#!JeI3O)5Eed9GZ*3rOd0%61ZE$I9Wguy7aBp&SAY*TBaA|C13TbU{Z*p`XZ*wdlb9r+tAarSMWeN%-EiElAEiElAEiElAEiElAEh7pFBOq#JZfSEMcVTjFVIVOyARr?O3TS5_Js=`nFf%h-Fgr6cZ6Y8e3TSj7Js=`nFf%h-F*`FdZ6Y8e3S(y=Js=`nFf%h-Fgr6gZ6Y8e3S)F2Js=`nFf%h-F*`FhZ6Y8e3Uy~7Js=`nFf%h-Fgr6fZ6Y8e3UzcKJs=`nFf%h-F*`FgZ6Y8e3UFs2Js=`nFf%h-Fgr6hZ6Y8e3UG8FJs=`nFf%h-F*`FiZ6Y8e3Sws<Js=`nFf%h-Fgr6eZ6Y8e3Sx91Js=`nFf%h-F*`FfZ6Y8e3T<Z~Js=`nFf%h-Fgr6bZ6Y8e3T<>CJs=`nFf%h-F*`FcZ6Y8e3Tt#AJs=`nFf%h-F*`FeZ6Y8e3TtN|Js=`nFf%h-Fgr6dZ6Y8e3L`BoEiElAEiElAEiElAEiEl03S?zwAarSKC@DG$33O?6X=@-oAX_IOCoCr}ASWy*E-oM^EGI55E+8i?CoV1^CoCr}ASWy*ASWy*E+8i?CoV1^CoCr}E-oM^T?z?iZ*m}SAZczObZK;HYdQ)bARr(hARr(hAaHVNZge0hXml$gTyk3?D{XWuA|WCxXml$gT_P)RbSoktQ)OgfZf785X&`WNZ*ygHA}enxEIV^~b1rjqWN&qJE@o_Xb7&|jJ9KGnWiE4UWo2+EF)0dUWo964VQFqCDLM)uARr)bb1rjvb97~GC?aEQWnpq6DGDGUARusZX>N2NA|fJOZY6UHAmWvj;+2r%m6zg`l;V|;;+2=;m6YO@rs9>C;+2%*m5}0<m*SPC;+3W#AmWvj;+2r%m6sqO;+2%*m5}0<m*SPCAmWv#;+2%*m5}0<mkJ=_m60GI;+2sg;+2u&m676=k>Zt+;+3G{m5}0<o#K^|;+2sgAmWvg;+3NyARywEpyHK~;+37^m73y}rs9>I;+2sgAPOMjm73y}km8k{;+2}>m5}0<o#K_LAmWv&;+3f&AmWv&;+3i5m5}0<o#K_6;+2r%m7O3U;+3f&ARywEsUYH&n&Op^;+35WC37HKC37n!b6p@Mb3t`<Xm4^LC37G;ASH7_Y-DL5Vqs%wbZKF7AaZGDVQC5_b0AwKb1NltT_7cMLvL+xX>4U6C389;C3A3jbZBpGASH7#ASH7qASH8fd30!RZXhLdGYTLeARr<lA|)tjbS!9eENgTuXml)abS!OWEO2xzXml)VbS!9eEO2xzZD%ZSbS!OjENgTuaC9tfbSVlTARr)SX>4U6Js@&ncVB64aCLMjA|-Pm;+2%*m5?QKTOcKKO<{9&YinU{AWmUzVIU=QW@&6?E^v7uC39U|ZY6Ud;+2}>m5?QKTOcKKNp5g;bRZ>jT_tlqC37GmB`9@tENFBraC9tdbS!9eEOm4&Xml)abS!9eENgTuXmlwl3LqdLAaHVNZge0bA_^cNARu&UYbYrSARr(haB^vGbRZ%k3LqdLAaHVNZge0bA_^cNARu&UZDlTVY-MF|C^9JuARr(haB^vGbRZ%nb6X{IAtiHNASH8CWn^J)XCO^wZf9j<Wosa2X>4U6C39OPb08&iASH8MAR;9wXml)XbS!9eEO2xzXml)XbS!3RY-KEHbSVlTARr)gX>DaLb8Ka0a40h=3LqdLAaHVNZge0bA_^cNARusZX>N2NA|-QMC37Jqb6p@Mb5mtxVQyz2V{dJ6X>4U6W@&6?ASH8KC37Gpb08&iT_7SQC}?ynZFDSXbS!XmENFBrZFDSVX>4UIXmlwGARr(hV{dJ6X>4UEW@&6?DGDGUARu&UZDlTVY-MF|C^IPvARr(haB^vGbRZ%k3LqdLAaHVNZge0bC39OPbK;znC39URb09)xa%f?5X>1@vZ*6dCY-J!zWgup0Y-J!Nb6X{IASH8SASH8MA|)tjbS!IhENFBraC9tabS!IhEM{qJWh`iPDGDGUARusZX>N2NA|eWHVQFqCDGC'))
# open source
# mentahan compile
# kalian salin ajh buat ricod wkwkwk
#!/usr/bin/python2
# print "\nMasukkan File Yang Mau Di EnPyc"
# py_file = raw_input("\033[33m[File.Py]:> \033[0m")
# from py_compile import compile
# compile(py_file)
# print
# print "\033[32mSukses file Di Di Simpan:\033[0m {}c".format(py_file)
# exit()
# Semoga yang ricod besok nya jadi jago Amin
# subscibe MR.1557
| 115.636364
| 2,112
| 0.786164
| 405
| 2,544
| 4.869136
| 0.681481
| 0.028398
| 0.017748
| 0.005071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125152
| 0.029481
| 2,544
| 21
| 2,113
| 121.142857
| 0.673552
| 0.154874
| 0
| 0
| 0
| 1
| 0.965307
| 0.962494
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
b30b70c07dd09d9f9b2f71771c8428b31e140523
| 54
|
py
|
Python
|
src/metafeatures/__init__.py
|
mateBakos/autoxgb
|
c3b3888492b12629a5b196afe97ae667578362ae
|
[
"BSD-3-Clause"
] | null | null | null |
src/metafeatures/__init__.py
|
mateBakos/autoxgb
|
c3b3888492b12629a5b196afe97ae667578362ae
|
[
"BSD-3-Clause"
] | null | null | null |
src/metafeatures/__init__.py
|
mateBakos/autoxgb
|
c3b3888492b12629a5b196afe97ae667578362ae
|
[
"BSD-3-Clause"
] | null | null | null |
from .test_function_square import test_function_square
| 54
| 54
| 0.925926
| 8
| 54
| 5.75
| 0.625
| 0.521739
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 54
| 1
| 54
| 54
| 0.901961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b35598384808f6de5ef836e79811877dde5ba129
| 31
|
py
|
Python
|
fa04bb71-5a49-479b-9683-687a3c24908a/456d3bba-1b53-4928-9447-f33107e746e1.py
|
copyleftdev/top_contrib_lol
|
362a5849b04dc2b52a9b0da138d4e9a32eade207
|
[
"Apache-2.0"
] | null | null | null |
fa04bb71-5a49-479b-9683-687a3c24908a/456d3bba-1b53-4928-9447-f33107e746e1.py
|
copyleftdev/top_contrib_lol
|
362a5849b04dc2b52a9b0da138d4e9a32eade207
|
[
"Apache-2.0"
] | null | null | null |
fa04bb71-5a49-479b-9683-687a3c24908a/456d3bba-1b53-4928-9447-f33107e746e1.py
|
copyleftdev/top_contrib_lol
|
362a5849b04dc2b52a9b0da138d4e9a32eade207
|
[
"Apache-2.0"
] | null | null | null |
#N canvas 1029 457 450 300 10;
| 15.5
| 30
| 0.709677
| 7
| 31
| 3.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.625
| 0.225806
| 31
| 1
| 31
| 31
| 0.291667
| 0.935484
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2fdc4f06cda900905eeb84bba9c0e7c37d042623
| 12,268
|
py
|
Python
|
testing/mesh_test.py
|
mateusazis/pyctm
|
7851d3279ea2ce790e24ea996cf8ce3637823ee7
|
[
"MIT"
] | null | null | null |
testing/mesh_test.py
|
mateusazis/pyctm
|
7851d3279ea2ce790e24ea996cf8ce3637823ee7
|
[
"MIT"
] | null | null | null |
testing/mesh_test.py
|
mateusazis/pyctm
|
7851d3279ea2ce790e24ea996cf8ce3637823ee7
|
[
"MIT"
] | null | null | null |
import io
import unittest
import struct
import sys
from pyctm import mesh
from pyctm import uv_map
from pyctm import attribute_map
from pyctm import mesh_writer
class BufferReader(object):
def __init__(self, buffer):
self.buffer = buffer
def seek(self, position):
self.buffer.seek(position)
def read_int(self):
result = struct.unpack('i', self.buffer.read(4))[0]
return result
def read_float(self):
result = struct.unpack('f', self.buffer.read(4))[0]
return result
def read_string(self, size):
result = self.buffer.read(size).decode('ASCII')
return result
VERTICES = [
0.0, 0.1, 0.2,
1.1, 1.2, 1.3,
2.2, 2.3, 2.4,
3.3, 3.4, 3.5,
]
INDEXES = [0, 1, 2, 1, 2, 3]
NORMALS = [
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
0.4, -0.7, 0.5,
]
COORDS_UV_MAP_0 = [
0.0, 0.0,
0.1, 0.1,
0.2, 0.2,
0.3, 0.3,
]
COORDS_UV_MAP_1 = [
0.1, 0.5,
0.3, 0.2,
-0.7, 0.3,
0.4, 0.3,
]
UV_MAPS = [
uv_map.UvMap(coords=COORDS_UV_MAP_0, name='map_0', texture_file_name='tex0.png'),
uv_map.UvMap(coords=COORDS_UV_MAP_1, name='another_map', texture_file_name='t1.png'),
]
ATTRIBUTES_0 = [
1, 2, 3, 4,
8, 7, 6, 5,
10, 15, 20, 25,
400, 200, 100, 300,
]
ATTRIBUTES_1 = [
15, 14, 13, 12,
11, 10, 9, 8,
7, 6, 5, 4,
3, 2, 1, 0,
]
ATTRIBUTES_2 = [
0, 4, 8, 12,
1, 5, 9, 13,
2, 6, 10, 14,
3, 7, 11, 15,
]
ATTRIBUTE_MAPS = [
attribute_map.AttributeMap(values=ATTRIBUTES_0, name='attr0'),
attribute_map.AttributeMap(values=ATTRIBUTES_1, name='a1'),
attribute_map.AttributeMap(values=ATTRIBUTES_2, name='attributes2'),
]
class MeshTest(unittest.TestCase):
def testWritesHeader(self):
test_mesh = mesh.Mesh(VERTICES, INDEXES, uv_maps=UV_MAPS, attribute_maps=ATTRIBUTE_MAPS)
writer = mesh_writer.MeshWriter(mesh_writer.CompressionMethod.RAW)
with io.BytesIO() as out:
writer.write(test_mesh, out)
out.seek(0)
reader = BufferReader(out)
self.assertEqual(reader.read_int(), 0x4d54434f) # 'OCTM' in ASCII
self.assertEqual(reader.read_int(), 5) # version
self.assertEqual(reader.read_int(), 0x00574152) # compression method
self.assertEqual(reader.read_int(), 4) # vertex count
self.assertEqual(reader.read_int(), 2) # triangle count
self.assertEqual(reader.read_int(), 2) # UV map count
self.assertEqual(reader.read_int(), 3) # attribute map count
self.assertEqual(reader.read_int(), 0) # boolean flags
self.assertEqual(reader.read_int(), 0) # comment length
def testWritesHeaderMissingOptionalFields(self):
test_mesh = mesh.Mesh(VERTICES, INDEXES)
writer = mesh_writer.MeshWriter(mesh_writer.CompressionMethod.RAW)
with io.BytesIO() as out:
writer.write(test_mesh, out)
reader = BufferReader(out)
out.seek(0)
self.assertEqual(reader.read_int(), 0x4d54434f) # 'OCTM' in ASCII
self.assertEqual(reader.read_int(), 5) # version
self.assertEqual(reader.read_int(), 0x00574152) # compression method
self.assertEqual(reader.read_int(), 4) # vertex count
self.assertEqual(reader.read_int(), 2) # triangle count
self.assertEqual(reader.read_int(), 0) # UV map count
self.assertEqual(reader.read_int(), 0) # attribute map count
self.assertEqual(reader.read_int(), 0) # boolean flags
self.assertEqual(reader.read_int(), 0) # comment length
def testWritesBody(self):
test_mesh = mesh.Mesh(VERTICES, INDEXES, normals=NORMALS, uv_maps=UV_MAPS, attribute_maps=ATTRIBUTE_MAPS)
writer = mesh_writer.MeshWriter(mesh_writer.CompressionMethod.RAW)
body_index = 36 + 0 # 36 + comment length
with io.BytesIO() as out:
writer.write(test_mesh, out)
reader = BufferReader(out)
reader.seek(body_index)
# Indexes
self.assertEqual(reader.read_int(), 0x58444e49) # 'INDX'
self.assertEqual(reader.read_int(), INDEXES[0])
self.assertEqual(reader.read_int(), INDEXES[1])
self.assertEqual(reader.read_int(), INDEXES[2])
self.assertEqual(reader.read_int(), INDEXES[3])
self.assertEqual(reader.read_int(), INDEXES[4])
self.assertEqual(reader.read_int(), INDEXES[5])
# Vertices
self.assertEqual(reader.read_int(), 0x54524556) # 'VERT'
self.assertAlmostEqual(reader.read_float(), 0.0, places=1)
self.assertAlmostEqual(reader.read_float(), 0.1, places=1)
self.assertAlmostEqual(reader.read_float(), 0.2, places=1)
self.assertAlmostEqual(reader.read_float(), 1.1, places=1)
self.assertAlmostEqual(reader.read_float(), 1.2, places=1)
self.assertAlmostEqual(reader.read_float(), 1.3, places=1)
self.assertAlmostEqual(reader.read_float(), 2.2, places=1)
self.assertAlmostEqual(reader.read_float(), 2.3, places=1)
self.assertAlmostEqual(reader.read_float(), 2.4, places=1)
self.assertAlmostEqual(reader.read_float(), 3.3, places=1)
self.assertAlmostEqual(reader.read_float(), 3.4, places=1)
self.assertAlmostEqual(reader.read_float(), 3.5, places=1)
# Normals
self.assertEqual(reader.read_int(), 0x4d524f4e) # 'NORM'
self.assertAlmostEqual(reader.read_float(), NORMALS[0], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[1], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[2], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[3], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[4], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[5], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[6], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[7], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[8], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[9], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[10], places=1)
self.assertAlmostEqual(reader.read_float(), NORMALS[11], places=1)
# Textures
self.assertEqual(reader.read_int(), 0x43584554) # 'TEXC'
self.assertEqual(reader.read_int(), 5) # length of 'map_0'
self.assertEqual(reader.read_string(5), 'map_0')
self.assertEqual(reader.read_int(), 8) # length of 'tex0.png'
self.assertEqual(reader.read_string(8), 'tex0.png')
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_0[0], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_0[1], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_0[2], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_0[3], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_0[4], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_0[5], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_0[6], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_0[7], places=1)
self.assertEqual(reader.read_int(), 0x43584554) # 'TEXC'
self.assertEqual(reader.read_int(), 11) # length of 'another_map'
self.assertEqual(reader.read_string(11), 'another_map')
self.assertEqual(reader.read_int(), 6) # length of 't1.png'
self.assertEqual(reader.read_string(6), 't1.png')
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_1[0], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_1[1], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_1[2], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_1[3], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_1[4], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_1[5], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_1[6], places=1)
self.assertAlmostEqual(reader.read_float(), COORDS_UV_MAP_1[7], places=1)
self.assertEqual(reader.read_int(), 0x52545441) # 'ATTR'
self.assertEqual(reader.read_int(), 5) # length of 'attr0'
self.assertEqual(reader.read_string(5), 'attr0')
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[0], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[1], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[2], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[3], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[4], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[5], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[6], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[7], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[8], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[9], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[10], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[11], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[12], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[13], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[14], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_0[15], places=1)
self.assertEqual(reader.read_int(), 0x52545441) # 'ATTR'
self.assertEqual(reader.read_int(), 2) # length of 'a1'
self.assertEqual(reader.read_string(2), 'a1')
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[0], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[1], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[2], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[3], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[4], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[5], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[6], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[7], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[8], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[9], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[10], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[11], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[12], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[13], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[14], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_1[15], places=1)
self.assertEqual(reader.read_int(), 0x52545441) # 'ATTR'
self.assertEqual(reader.read_int(), 11) # length of 'attributes2'
self.assertEqual(reader.read_string(11), 'attributes2')
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[0], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[1], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[2], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[3], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[4], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[5], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[6], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[7], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[8], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[9], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[10], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[11], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[12], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[13], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[14], places=1)
self.assertAlmostEqual(reader.read_float(), ATTRIBUTES_2[15], places=1)
if __name__ == '__main__':
unittest.main()
| 46.29434
| 109
| 0.704027
| 1,692
| 12,268
| 4.92435
| 0.072695
| 0.160826
| 0.285166
| 0.327412
| 0.8741
| 0.842055
| 0.773404
| 0.737398
| 0.648704
| 0.274844
| 0
| 0.058315
| 0.151532
| 12,268
| 264
| 110
| 46.469697
| 0.742146
| 0.041898
| 0
| 0.188841
| 0
| 0
| 0.009475
| 0
| 0
| 0
| 0.010243
| 0
| 0.575107
| 1
| 0.034335
| false
| 0
| 0.034335
| 0
| 0.090129
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2febf0abbfad61a15d652f506894d9041b24bc49
| 48,241
|
py
|
Python
|
tests/panoramic/cli/husky/test/mocks/core/taxonomy.py
|
kubamahnert/panoramic-cli
|
036f45a05d39f5762088ce23dbe367b938192f79
|
[
"MIT"
] | 5
|
2020-11-13T17:26:59.000Z
|
2021-03-19T15:11:26.000Z
|
tests/panoramic/cli/husky/test/mocks/core/taxonomy.py
|
kubamahnert/panoramic-cli
|
036f45a05d39f5762088ce23dbe367b938192f79
|
[
"MIT"
] | 5
|
2020-10-28T10:22:35.000Z
|
2021-01-27T17:33:58.000Z
|
tests/panoramic/cli/husky/test/mocks/core/taxonomy.py
|
kubamahnert/panoramic-cli
|
036f45a05d39f5762088ce23dbe367b938192f79
|
[
"MIT"
] | 3
|
2021-01-26T07:58:03.000Z
|
2021-03-11T13:28:34.000Z
|
from collections import OrderedDict
from typing import Dict, Iterable, List, Optional, Tuple
from panoramic.cli.husky.core.taxonomy.enums import AggregationType
from panoramic.cli.husky.core.taxonomy.exceptions import TaxonsNotFound
from panoramic.cli.husky.core.taxonomy.getters import get_taxon_tel_metadata
from panoramic.cli.husky.core.taxonomy.models import Taxon
from panoramic.cli.husky.service.utils.taxon_slug_expression import (
TaxonExpressionStr,
TaxonMap,
TaxonSlugExpression,
)
from tests.panoramic.cli.husky.test.mocks.husky_model import MOCK_DATA_SOURCE_NAME
taxon_mocks = [
Taxon.create(
slug='enhanced_cpm',
display_name='Enhanced Spend for link clicks objective',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='1000 * enhanced_spend / generic_impressions',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='enhanced_spend',
display_name='Enhanced Spend for link clicks objective',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='iff(objective == "LINK_CLICKS",generic_spend*1.5,generic_spend)',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='ad_id',
display_name='Ad Id',
taxon_description='Id of an Ad',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym='Ad Id',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='adgroup_id',
display_name='Adgroup Id',
taxon_description='Id of an Adgroup',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym='Adgroup Id',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='gender',
display_name='Gender',
taxon_description='Distribution of metrics by gender',
taxon_group='Audience',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym='Gender',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='facebook_ads|gender',
display_name='Gender',
taxon_description='Distribution of metrics by gender',
taxon_group='Audience',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
data_source='facebook_ads',
display_state='visible',
display_settings=None,
acronym='Gender',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='twitter|gender',
display_name='Gender',
taxon_description='Distribution of metrics by gender',
taxon_group='Audience',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
data_source='twitter',
display_state='visible',
display_settings=None,
acronym='Gender',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='age_bucket',
display_name='Age Bucket',
taxon_description='Distribution of metrics by age',
taxon_group='Audience',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym='Age',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='ad_name',
display_name='Ad Name',
taxon_description='Name of ad in platform',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='impressions',
display_name='Impressions',
taxon_description='impressions',
taxon_group='Exposure',
taxon_type='metric',
validation_type='integer',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='impressions_numeric',
display_name='Impressions_numeric',
taxon_description='Impressions_numeric',
taxon_group='Exposure',
taxon_type='metric',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='spend',
display_name='Marketing Spend',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym='Spend',
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='avg_spend',
display_name='Average Marketing Spend',
taxon_description='Average amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
validation_type='money',
company_id='50',
aggregation={'type': 'avg'},
settings=None,
display_state='visible',
display_settings=None,
acronym='Spend',
),
Taxon.create(
slug='views',
display_name='Video Views',
taxon_description='number of views',
taxon_group='Billing',
taxon_type='metric',
validation_type='integer',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='link_clicks',
display_name='Link Clicks',
taxon_description='number of link clicks',
taxon_group='Billing',
taxon_type='metric',
validation_type='integer',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='conversions',
display_name='Conversions',
taxon_description='number of Conversions',
taxon_group='Billing',
taxon_type='metric',
validation_type='integer',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='conversion_rate',
display_name='CVR',
taxon_description='Conversion rate',
taxon_group='Attribution',
taxon_type='metric',
calculation='conversions / link_clicks',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='account_id',
display_name='Account ID',
taxon_description='Account ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='company_id',
display_name='Company ID',
taxon_description='Company ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='project_id',
display_name='Project ID',
taxon_description='Project ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='campaign_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='raw_data',
display_name='Account ID',
taxon_description='Account ID',
taxon_group='Exposure',
taxon_type='metric',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='computed_simple',
display_name='Simple computed taxon',
taxon_description='Simple computed taxon',
taxon_group='Exposure',
taxon_type='metric',
calculation='spend * impressions + 10',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='computed_division',
display_name='Computed taxon with division',
taxon_description='Computex taxon with division',
taxon_group='Exposure',
taxon_type='metric',
calculation='spend/impressions * 20',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='computed_const',
display_name='Computed taxon with constant',
taxon_description='Computed taxon with constant',
taxon_group='Exposure',
taxon_type='metric',
calculation='2',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='objective',
display_name='Objective',
taxon_description='Distribution of metrics by objective',
taxon_group='Audience',
taxon_type='dimension',
validation_type='enum',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='cpm',
display_name='CPM',
taxon_description='CPM metric',
taxon_group='Exposure',
taxon_type='metric',
calculation='1000 * spend / impressions',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='cpm_no_agg',
display_name='CPM no aggregation defined',
taxon_description='CPM metric no aggregation defined',
taxon_group='Exposure',
taxon_type='metric',
calculation='1000 * spend / impressions',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='date',
display_name='Date',
taxon_description='date',
taxon_group='Time',
taxon_type='dimension',
validation_type='datetime',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'group_by', 'params': None},
acronym=None,
),
Taxon.create(
slug='week_of_year',
display_name='Week of year',
taxon_description='Time bucketed by week of the year',
taxon_group='Time',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='date_hour',
display_name='Hour',
taxon_description='Time with hour precision.',
taxon_group='Time',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='facebook_ads|spend',
data_source='facebook_ads',
display_name='Facebook Marketing Spend',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='facebook|ads_spend',
data_source='facebook',
display_name='Facebook Marketing Spend',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='adwords|spend',
data_source='adwords',
display_name='Adwords Marketing Spend',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='twitter|spend',
data_source='twitter',
display_name='Twitter Marketing Spend',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='facebook_ads|impressions',
data_source='facebook_ads',
display_name='Facebook Impressions',
taxon_description='Impr',
taxon_group='Billing',
taxon_type='metric',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='adwords|impressions',
data_source='adwords',
display_name='Adwords Impressions',
taxon_description='Impr',
taxon_group='Billing',
taxon_type='metric',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='twitter|impressions',
data_source='twitter',
display_name='Twitter Impressions',
taxon_description='Impr',
taxon_group='Billing',
taxon_type='metric',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='generic_spend',
display_name='Generic Marketing Spend',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='?facebook_ads|spend + ?adwords|spend + ?twitter|spend',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='generic_impressions',
display_name='Generic Impressions',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='?facebook_ads|impressions + ?adwords|impressions + ?twitter|impressions',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='generic_spend2',
display_name='Generic Marketing Spend 2',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='generic_spend',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='generic_cpm',
display_name='Generic Marketing Spend 2',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='1000 * generic_spend / generic_impressions',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='generic_cpm_at_least_value_10',
display_name='Generic CPM',
taxon_description='formula is: iff(generic_cpm>10, generic_cpm, 10)',
taxon_group='Billing',
taxon_type='metric',
calculation='iff(generic_cpm>10, generic_cpm, 10)',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_tw_spend_all_required',
display_name='fb_tw_spend_all_required',
taxon_description='',
taxon_group='Billing',
taxon_type='metric',
calculation='facebook_ads|spend + twitter|spend',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_adwords_spend_all_required',
display_name='fb_adwords_spend_all_required',
taxon_description='',
taxon_group='Billing',
taxon_type='metric',
calculation='facebook_ads|spend + adwords|spend',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_tw_spend_all_optional',
display_name='fb_tw_spend_all_optional',
taxon_description='',
taxon_group='Billing',
taxon_type='metric',
calculation='?facebook_ads|spend + ?twitter|spend',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_tw_adwords_spend_all_required',
display_name='fb_tw_adwords_spend_all_required',
taxon_description='',
taxon_group='Billing',
taxon_type='metric',
calculation='facebook_ads|spend + twitter|spend + adwords|spend',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_tw_adwords_spend_all_optional',
display_name='fb_tw_adwords_spend_all_optional',
taxon_description='',
taxon_group='Billing',
taxon_type='metric',
calculation='?facebook_ads|spend + ?twitter|spend + ?adwords|spend',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_tw_adwords_impressions_all_optional',
display_name='fb_tw_adwords_impressions_all_optional',
taxon_description='',
taxon_group='Billing',
taxon_type='metric',
calculation='?facebook_ads|impressions + ?twitter|impressions + ?adwords|impressions',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='facebook_ads|ad_id',
display_name='facebook_ads|ad_id',
data_source='facebook_ads',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='fb_ad_id',
display_name='fb_ad_id',
data_source='facebook_ads',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
calculation='facebook_ads|ad_id',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='facebook_ads|ad_name',
display_name='facebook_ads|ad_name',
data_source='facebook_ads',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='twitter|ad_id',
display_name='twitter|ad_id',
data_source='twitter',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='tw_ad_id',
display_name='tw_ad_id',
data_source='twitter',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
calculation='twitter|ad_id',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='twitter|ad_name',
display_name='twitter|ad_name',
data_source='twitter',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='fb_tw_merged_ad_id',
display_name='fb_tw_merged_ad_id',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
calculation='merge(?facebook_ads|ad_id, ?twitter|ad_id)',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='simple_concat_dimension',
display_name='simple_concat_dimension',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
calculation='concat("a", ad_id)',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
data_source='twitter',
slug='twitter|objective',
display_name='twitter|objective',
taxon_description='Distribution of metrics by objective',
taxon_group='Audience',
taxon_type='dimension',
validation_type='enum',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
data_source='facebook_ads',
slug='facebook_ads|objective',
display_name='facebook_ads|objective',
taxon_description='Distribution of metrics by objective',
taxon_group='Audience',
taxon_type='dimension',
validation_type='enum',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': AggregationType.group_by.value},
),
Taxon.create(
slug='fb_tw_merged_objective',
display_name='fb_tw_merged_objective',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
calculation='merge(?facebook_ads|objective, ?twitter|objective)',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_tw_merged_objective_concat_xxx',
display_name='fb_tw_merged_objective_concat_xxx',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
calculation='concat(merge(?facebook_ads|objective, ?twitter|objective),"_xxx")',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_tw_merged_objective_concat_xxx',
display_name='fb_tw_merged_objective',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
calculation='concat(fb_tw_merged_objective,"xxx")',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_tw_merged_objective_override_unknowns',
display_name='fb_tw_merged_objective_override',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
calculation='override(fb_tw_merged_objective,"om-slug")',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='fb_tw_merged_objective_override_no_unknowns',
display_name='fb_tw_merged_objective_override_no_unknowns',
taxon_description='',
taxon_group='Billing',
taxon_type='dimension',
calculation='override(fb_tw_merged_objective,"om-slug", false)',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='sumiff_spend_with_merged_objective',
display_name='sumiff_spend_with_merged_objective',
taxon_description='',
taxon_group='Billing',
taxon_type='metric',
calculation='iff(fb_tw_merged_objective == "LINK_CLICKS",generic_spend*1.5,generic_spend)',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='data_source',
display_name='Data Source',
taxon_description='Name of data source',
taxon_group='INTERNAL',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym='Data source',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='facebook_ads|date',
display_name='Date',
data_source='facebook_ads',
taxon_description='date',
taxon_group='Time',
taxon_type='dimension',
validation_type='datetime',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='facebook_ads|done',
display_name='Done',
data_source='facebook_ads',
taxon_description='done',
taxon_group='Time',
taxon_type='dimension',
validation_type='boolean',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug=f'{MOCK_DATA_SOURCE_NAME}|dimension',
display_name='Dimension',
data_source=MOCK_DATA_SOURCE_NAME,
taxon_description='dimension',
taxon_group='INTERNAL',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug=f'{MOCK_DATA_SOURCE_NAME}|metric',
display_name='Metric',
data_source=MOCK_DATA_SOURCE_NAME,
taxon_description='metric',
taxon_group='INTERNAL',
taxon_type='metric',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='twitter|date',
display_name='Date',
data_source='twitter',
taxon_description='date',
taxon_group='Time',
taxon_type='dimension',
validation_type='datetime',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='merged_date',
display_name='Date',
data_source='twitter',
taxon_description='date',
taxon_group='Time',
taxon_type='dimension',
calculation='merge(?facebook_ads|date,?twitter|date)',
validation_type='datetime',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='pinterest|total_conversions_value',
display_name='Total Conversions Value',
data_source='pinterest',
taxon_description='Total Conversions Value',
taxon_group='Namespaced',
taxon_type='metric',
calculation='pinterest|total_conversions_value_in_micro_dollar_microdollars * 1000000',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='pinterest|total_conversions_value_in_micro_dollar_microdollars',
display_name='Total Conversions Value In Micro Dollar Microdollars',
data_source='pinterest',
taxon_description='Total Conversions Value In Micro Dollar Microdollars',
taxon_group='Namespaced',
taxon_type='metric',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'sum', 'params': None},
),
Taxon.create(
slug='city',
display_name='City',
data_source=None,
taxon_description='The city in which a user views or interacts with your ad. ',
taxon_group='Location',
taxon_type='dimension',
calculation='merge(?bing|city)',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='bing|city',
display_name='City',
data_source='bing',
taxon_description='The city in which a user views or interacts with your ad. ',
taxon_group='Namespaced',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='campaign_id2',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
calculation='merge(?facebook_ads|campaign_id, ?doubleclick_campaign|campaign_id_varchar, '
'?pinterest|campaign_id, ?adwords|campaign_id, ?doubleclick_bid|campaign_id, '
'?the_trade_desk|campaign_id, ?linkedin|campaign_group_id, ?twitter|campaign_id, '
'?snapchat|campaign_id, ?appnexus|line_item_id, ?verizon_dsp|order_id, ?bing|campaign_id)',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='facebook_ads|campaign_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='facebook_ads',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='doubleclick_campaign|campaign_id_varchar',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='doubleclick_campaign',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='pinterest|campaign_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='pinterest',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='adwords|campaign_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='adwords',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='doubleclick_bid|campaign_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='doubleclick_bid',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='the_trade_desk|campaign_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='the_trade_desk',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='linkedin|campaign_group_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='linkedin',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='twitter|campaign_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='twitter',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='snapchat|campaign_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='snapchat',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='appnexus|line_item_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='appnexus',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='verizon_dsp|order_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='verizon_dsp',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='bing|campaign_id',
display_name='Campaign ID',
taxon_description='Campaign ID',
taxon_group='Entity',
taxon_type='dimension',
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
data_source='bing',
aggregation={'type': 'group_by', 'params': None},
),
Taxon.create(
slug='spend_w_impr',
display_name='Enhanced Spend for link clicks objective',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='spend + impressions',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='enhanced_cpm_2',
display_name='Enhanced Cpm 2 for link clicks objective',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='enhanced_cpm',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='enhanced_cpm_no_agg',
display_name='Enhanced Cpm 2 for link clicks objective',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='enhanced_cpm',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
Taxon.create(
slug='computed_metric_avg',
display_name='Computed metric with avg taxons',
taxon_description='Computed metric with avg taxons',
taxon_group='Exposure',
taxon_type='metric',
calculation='1000 * avg_spend',
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
),
Taxon.create(
slug='simple_count_all',
display_name='Simple count all taxon',
taxon_description='Simple count all taxon',
taxon_group='custom',
taxon_type='dimension',
calculation=None,
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': AggregationType.count_all.value, 'params': None},
),
Taxon.create(
slug='simple_min',
display_name='Simple min taxon',
taxon_description='Simple min taxon',
taxon_group='custom',
taxon_type='dimension',
calculation=None,
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': AggregationType.min.value, 'params': None},
),
Taxon.create(
slug='simple_max',
display_name='Simple max taxon',
taxon_description='Simple max taxon',
taxon_group='custom',
taxon_type='dimension',
calculation=None,
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': AggregationType.max.value, 'params': None},
),
Taxon.create(
slug='simple_count_distinct',
display_name='Simple count distinct taxon',
taxon_description='Simple count distinct taxon',
taxon_group='custom',
taxon_type='dimension',
calculation=None,
validation_type='numeric',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={'type': AggregationType.count_distinct.value, 'params': {'relevant_fields': []}},
),
Taxon.create(
slug='simple_first_by',
display_name='Simple first_by taxon',
taxon_description='Simple first_by taxon',
taxon_group='custom',
taxon_type='dimension',
calculation=None,
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={
'type': AggregationType.first_by.value,
'params': {'sort_dimensions': [{'taxon': 'objective', 'order_by': 'asc'}]},
},
),
Taxon.create(
slug='simple_last_by',
display_name='Simple last by taxon',
taxon_description='Simple last by taxon',
taxon_group='custom',
taxon_type='dimension',
calculation=None,
validation_type='text',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
acronym=None,
aggregation={
'type': AggregationType.last_by.value,
'params': {'sort_dimensions': [{'taxon': 'objective', 'order_by': 'asc'}]},
},
),
Taxon.create(
slug='required_spend',
display_name='Required Marketing Spend',
taxon_description='Total amount of money invested into marketing',
taxon_group='Billing',
taxon_type='metric',
calculation='facebook_ads|spend + adwords|spend + twitter|spend',
validation_type='money',
company_id='50',
settings=None,
display_state='visible',
display_settings=None,
),
]
def _add_tel_metadata_to_taxons(taxon_map) -> TaxonMap:
from panoramic.cli.husky.core.taxonomy.getters import Taxonomy
Taxonomy.preload_taxons(taxon_mocks)
for taxon in taxon_map.values():
taxon.tel_metadata = get_taxon_tel_metadata(taxon)
return taxon_map
TAXON_MAP: Dict[TaxonExpressionStr, Taxon] = {taxon.slug_expr: taxon for taxon in taxon_mocks}
_add_tel_metadata_to_taxons(TAXON_MAP)
"""
Not to be used directly, use mock_get_taxons_map()
"""
def mock_get_taxons_map(_company_id: Optional[int], _taxon_slugs: Iterable[str]) -> Dict[TaxonExpressionStr, Taxon]:
"""
Fn that should be used to get mocked taxons.
"""
taxon_map = {slug: TAXON_MAP[slug] for slug in _taxon_slugs if slug in TAXON_MAP}
not_found_taxons = [slug for slug in _taxon_slugs if slug not in taxon_map]
if len(not_found_taxons) > 0:
raise TaxonsNotFound(taxon_slugs=not_found_taxons)
return taxon_map
def get_specific_select_mocked_taxons(taxon_slugs: List[str]) -> Dict[TaxonSlugExpression, Taxon]:
results = OrderedDict()
for taxon_slug in taxon_slugs:
taxon = mock_get_taxons_map(None, [taxon_slug])[taxon_slug]
taxon_slug_expression = TaxonSlugExpression(taxon_slug)
results[taxon_slug_expression] = taxon
return results
def get_specific_projection_mocked_taxons(taxon_slugs: List[str]) -> List[Tuple[TaxonExpressionStr, Taxon]]:
return [
(TaxonExpressionStr(slug.slug), taxon) for slug, taxon in get_specific_select_mocked_taxons(taxon_slugs).items()
]
def get_mocked_taxons_by_slug(taxon_slugs: Iterable[str]) -> List[Taxon]:
return [TAXON_MAP[slug] for slug in taxon_slugs]
def mock_get_taxons(
company_ids: Optional[Iterable[str]] = None,
taxon_slugs: Optional[Iterable[str]] = None,
only_computed: Optional[bool] = None,
data_sources: Optional[Iterable[Optional[str]]] = None,
used_taxons: Optional[Iterable[str]] = None,
):
"""
Mock counterpart to get_taxons()
"""
def taxon_filter(taxon: Taxon):
if taxon_slugs and taxon.slug not in taxon_slugs:
return False
if only_computed is True and taxon.is_computed_metric is False:
return False
if data_sources and taxon.data_source not in data_sources:
return False
if used_taxons and taxon.slug not in used_taxons:
return False
return True
return [taxon for taxon in TAXON_MAP.values() if taxon_filter(taxon)]
| 31.947682
| 120
| 0.607782
| 4,980
| 48,241
| 5.618072
| 0.048996
| 0.085782
| 0.039317
| 0.09293
| 0.848488
| 0.817035
| 0.786475
| 0.760347
| 0.731432
| 0.706913
| 0
| 0.007231
| 0.271823
| 48,241
| 1,509
| 121
| 31.968854
| 0.789228
| 0.001596
| 0
| 0.788043
| 0
| 0
| 0.23921
| 0.042746
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004755
| false
| 0
| 0.006114
| 0.001359
| 0.018342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
64116fa7a7a9777485b86dc168dca0480e7b54bd
| 12,514
|
py
|
Python
|
app/blueprints/api/presence_summary/views.py
|
mdazharulhoque7/azureFlaskPG
|
4fc3a67f2a38eb4fa8eb3484f332b96b576d3f60
|
[
"MIT"
] | null | null | null |
app/blueprints/api/presence_summary/views.py
|
mdazharulhoque7/azureFlaskPG
|
4fc3a67f2a38eb4fa8eb3484f332b96b576d3f60
|
[
"MIT"
] | null | null | null |
app/blueprints/api/presence_summary/views.py
|
mdazharulhoque7/azureFlaskPG
|
4fc3a67f2a38eb4fa8eb3484f332b96b576d3f60
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, jsonify
# from .models import PresenceLogSummary
# from blueprints.api.access_ponit.models import AssessPoint
blueprint = Blueprint('api_presence_summary_blueprint', __name__,)
@blueprint.route('/',methods=['GET'])
def presence_summary():
# access_points = current_session.query(AssessPoint).all()
# access_point_dict = {}
# for ap in access_points:
# if ap.zone_id == None:
# continue
# if ap.zone_id in access_point_dict:
# access_point_dict[ap.zone_id].append(ap.id)
# else:
# access_point_dict.update({ap.zone_id:[ap.id]})
#
# now = datetime.datetime.utcnow() - datetime.timedelta(minutes=2)
# before_5_min = now - datetime.timedelta(minutes=5)
# before_15_min = now - datetime.timedelta(minutes=15)
# before_60_min = now - datetime.timedelta(minutes=60)
# before_120_min = now - datetime.timedelta(minutes=120)
# before_480_min = now - datetime.timedelta(minutes=480)
# before_1_day = now - datetime.timedelta(days=60)
# data = {}
#
# deleted_rows = current_session.query(PresenceLogSummary).filter(or_(PresenceLogSummary.last_seen <= before_120_min.strftime("%Y-%m-%d %H:%M:%S"),
# PresenceLogSummary.first_seen <= before_480_min.strftime("%Y-%m-%d %H:%M:%S"))).delete(synchronize_session=False)
# # import pdb;pdb.set_trace()
# print 'Rows Deleted: '+str(deleted_rows)
# current_session.commit()
#
#
#
# tsa_in_5_min_time_query = current_session.query(
# PresenceLogSummary.mac.label('mac'),
# (PresenceLogSummary.last_seen - PresenceLogSummary.first_seen).label('duration')
# )\
# .filter(and_(PresenceLogSummary.passed_post_tsa==True,
# ~PresenceLogSummary.first_access_point_id.in_(access_point_dict[1]),
# PresenceLogSummary.last_access_point_id.in_(access_point_dict[4]),
# PresenceLogSummary.last_seen >= before_5_min.strftime("%Y-%m-%d %H:%M:%S"),
# PresenceLogSummary.last_seen <= now.strftime("%Y-%m-%d %H:%M:%S")
# ))
#
# tsa_in_5_min_time_data = []
# num_od_devices_on_transit_period_in_5_min = []
# for delta in tsa_in_5_min_time_query.all():
# seconds = delta.duration.total_seconds()
# if seconds >120:
# tsa_in_5_min_time_data.append(seconds%3600/60)
#
#
#
#
#
#
#
# tsa_in_15_min_time_query = current_session.query(PresenceLogSummary.mac.label('mac'),
# (PresenceLogSummary.last_seen - PresenceLogSummary.first_seen).label('duration'))\
# .filter(and_(
# PresenceLogSummary.passed_post_tsa==True,
# ~PresenceLogSummary.first_access_point_id.in_(access_point_dict[1]),
# PresenceLogSummary.last_access_point_id.in_(access_point_dict[4]),
# PresenceLogSummary.last_seen >= before_15_min.strftime("%Y-%m-%d %H:%M:%S"),
# PresenceLogSummary.last_seen <= now.strftime("%Y-%m-%d %H:%M:%S")
# ))
#
# tsa_in_15_min_time_data = []
# for delta in tsa_in_15_min_time_query.all():
# seconds = delta.duration.total_seconds()
# if seconds >120:
# tsa_in_15_min_time_data.append(seconds%3600/60)
#
#
# tsa_in_60_min_time_query = current_session.query(PresenceLogSummary.mac.label('mac'),
# (PresenceLogSummary.last_seen - PresenceLogSummary.first_seen).label('duration'))\
# .filter(and_(PresenceLogSummary.passed_post_tsa==True,
# ~PresenceLogSummary.first_access_point_id.in_(access_point_dict[1]),
# PresenceLogSummary.last_access_point_id.in_(access_point_dict[4]),
# PresenceLogSummary.last_seen >= before_60_min.strftime("%Y-%m-%d %H:%M:%S"),
# PresenceLogSummary.last_seen <= now.strftime("%Y-%m-%d %H:%M:%S")
# ))
#
# tsa_in_60_min_time_data = []
# for delta in tsa_in_60_min_time_query.all():
# seconds = delta.duration.total_seconds()
# if seconds >120:
# tsa_in_60_min_time_data.append(seconds%3600/60)
#
#
#
#
# sl_in_5_min_time_query = current_session.query(PresenceLogSummary.mac.label('mac'),
# (PresenceLogSummary.last_seen - PresenceLogSummary.first_seen).label('duration'))\
# .filter(and_(
# PresenceLogSummary.passed_post_tsa==True,
# PresenceLogSummary.passed_security_line==True,
# ~PresenceLogSummary.first_access_point_id.in_(access_point_dict[1]),
# PresenceLogSummary.last_access_point_id.in_(access_point_dict[4]),
# PresenceLogSummary.last_seen >= before_5_min.strftime("%Y-%m-%d %H:%M:%S"),
# PresenceLogSummary.last_seen <= now.strftime("%Y-%m-%d %H:%M:%S")
# ))
#
#
# sl_in_5_min_time_data = []
# for delta in sl_in_5_min_time_query.all():
# seconds = delta.duration.total_seconds()
# if seconds >120:
# sl_in_5_min_time_data.append(seconds%3600/60)
#
#
#
# sl_in_15_min_time_query = current_session.query(PresenceLogSummary.mac.label('mac'),
# (PresenceLogSummary.last_seen - PresenceLogSummary.first_seen).label('duration'))\
# .filter(and_(PresenceLogSummary.passed_post_tsa==True,
# PresenceLogSummary.passed_security_line==True,
# ~PresenceLogSummary.first_access_point_id.in_(access_point_dict[1]),
# PresenceLogSummary.last_access_point_id.in_(access_point_dict[4]),
# PresenceLogSummary.last_seen >= before_15_min.strftime("%Y-%m-%d %H:%M:%S"),
# PresenceLogSummary.last_seen <= now.strftime("%Y-%m-%d %H:%M:%S")
# ))
#
#
#
# sl_in_15_min_time_data = []
# for delta in sl_in_15_min_time_query.all():
# seconds = delta.duration.total_seconds()
# if seconds >120:
# sl_in_15_min_time_data.append(seconds%3600/60)
#
#
#
# sl_in_60_min_time_query = current_session.query(PresenceLogSummary.mac.label('mac'),
# (PresenceLogSummary.last_seen - PresenceLogSummary.first_seen).label('duration'))\
# .filter(and_(
# PresenceLogSummary.passed_post_tsa==True,
# PresenceLogSummary.passed_security_line==True,
# ~PresenceLogSummary.first_access_point_id.in_(access_point_dict[1]),
# PresenceLogSummary.last_access_point_id.in_(access_point_dict[4]),
# PresenceLogSummary.last_seen >= before_60_min.strftime("%Y-%m-%d %H:%M:%S"),
# PresenceLogSummary.last_seen <= now.strftime("%Y-%m-%d %H:%M:%S")
# ))
#
#
#
# sl_in_60_min_time_data = []
# for delta in sl_in_60_min_time_query.all():
# seconds = delta.duration.total_seconds()
# if seconds >120:
# sl_in_60_min_time_data.append(seconds%3600/60)
#
#
#
#
#
# data.update({'transit_period':{
# 'in_5_min':{
# 'devices':len(set(tsa_in_5_min_time_data)),
# 'mean': int(np.mean(sorted(tsa_in_5_min_time_data))) if tsa_in_5_min_time_data else 0,
# 'median': int(np.median(sorted(tsa_in_5_min_time_data))) if tsa_in_5_min_time_data else 0,
# 'percentile': int(np.percentile(sorted(tsa_in_5_min_time_data),95)) if tsa_in_5_min_time_data else 0,
# },
# 'in_15_min':{
# 'devices':len(set(tsa_in_15_min_time_data)),
# 'mean': int(np.mean(sorted(tsa_in_15_min_time_data))) if tsa_in_15_min_time_data else 0,
# 'median': int(np.median(sorted(tsa_in_15_min_time_data))) if tsa_in_15_min_time_data else 0,
# 'percentile': int(np.percentile(sorted(tsa_in_15_min_time_data),95)) if tsa_in_15_min_time_data else 0,
# },
# 'in_60_min':{
# 'devices':len(set(tsa_in_60_min_time_data)),
# 'mean': int(np.mean(sorted(tsa_in_60_min_time_data))) if tsa_in_60_min_time_data else 0,
# 'median': int(np.median(sorted(tsa_in_60_min_time_data))) if tsa_in_60_min_time_data else 0,
# 'percentile': int(np.percentile(sorted(tsa_in_60_min_time_data),95)) if tsa_in_60_min_time_data else 0,
# }
# },'security_period':{
# 'in_5_min':{
# 'devices':len(set(sl_in_5_min_time_data)),
# 'mean': int(np.mean(sorted(sl_in_5_min_time_data))) if sl_in_5_min_time_data else 0,
# 'median': int(np.median(sorted(sl_in_5_min_time_data))) if sl_in_5_min_time_data else 0,
# 'percentile': int(np.percentile(sorted(sl_in_5_min_time_data),95)) if sl_in_5_min_time_data else 0,
# },
# 'in_15_min':{
# 'devices':len(set(sl_in_15_min_time_data)),
# 'mean': int(np.mean(sorted(sl_in_15_min_time_data))) if sl_in_15_min_time_data else 0,
# 'median': int(np.median(sorted(sl_in_15_min_time_data))) if sl_in_15_min_time_data else 0,
# 'percentile': int(np.percentile(sorted(sl_in_15_min_time_data),95)) if sl_in_15_min_time_data else 0,
# },
# 'in_60_min':{
# 'devices':len(set(sl_in_60_min_time_data)),
# 'mean': int(np.mean(sorted(sl_in_60_min_time_data))) if sl_in_60_min_time_data else 0,
# 'median': int(np.median(sorted(sl_in_60_min_time_data))) if sl_in_60_min_time_data else 0,
# 'percentile': int(np.percentile(sorted(sl_in_60_min_time_data),95)) if sl_in_60_min_time_data else 0,
# }
# }
# })
#
#
# response = current_app.response_class(
# response=json.dumps({'message':'Success--------'}),
# status=200,
# mimetype='application/json'
# )
# return response
return jsonify({'message':'success'})
| 59.590476
| 187
| 0.501518
| 1,313
| 12,514
| 4.36329
| 0.091394
| 0.080642
| 0.103683
| 0.038401
| 0.842381
| 0.806598
| 0.800663
| 0.776575
| 0.751789
| 0.698726
| 0
| 0.034388
| 0.391162
| 12,514
| 210
| 188
| 59.590476
| 0.717548
| 0.888045
| 0
| 0
| 0
| 0
| 0.039967
| 0.024979
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0.6
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
|
0
| 10
|
6419c4b01be97752aa723c48bc19ae0065b4f0b4
| 122
|
py
|
Python
|
timewarpy/tests/test_datasets.py
|
celmore25/TimeWarPY
|
09a18120d868eca8167d66e66f9475ee58079817
|
[
"BSD-3-Clause"
] | 1
|
2021-12-10T00:21:21.000Z
|
2021-12-10T00:21:21.000Z
|
timewarpy/tests/test_datasets.py
|
celmore25/TimeWarPY
|
09a18120d868eca8167d66e66f9475ee58079817
|
[
"BSD-3-Clause"
] | null | null | null |
timewarpy/tests/test_datasets.py
|
celmore25/TimeWarPY
|
09a18120d868eca8167d66e66f9475ee58079817
|
[
"BSD-3-Clause"
] | null | null | null |
from timewarpy import datasets
def test_load_energy_data():
assert datasets.load_energy_data().shape == (19735, 29)
| 20.333333
| 59
| 0.762295
| 17
| 122
| 5.176471
| 0.764706
| 0.227273
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0.139344
| 122
| 5
| 60
| 24.4
| 0.771429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
641d2097aa0caafa416d02fad016f21cfaef1a9a
| 1,443
|
py
|
Python
|
bench/test_hpack.py
|
steamraven/hpack
|
c76d07a6b07a3473bde21b972353be3863a9b68f
|
[
"MIT"
] | 55
|
2015-07-07T15:33:23.000Z
|
2022-01-23T18:30:21.000Z
|
bench/test_hpack.py
|
steamraven/hpack
|
c76d07a6b07a3473bde21b972353be3863a9b68f
|
[
"MIT"
] | 69
|
2015-06-29T17:25:39.000Z
|
2021-11-20T18:22:12.000Z
|
bench/test_hpack.py
|
steamraven/hpack
|
c76d07a6b07a3473bde21b972353be3863a9b68f
|
[
"MIT"
] | 28
|
2015-11-12T13:35:53.000Z
|
2021-04-29T12:05:59.000Z
|
from hpack.hpack import (
encode_integer,
decode_integer
)
class TestHpackEncodingIntegersBenchmarks:
def test_encode_small_integer_large_prefix(self, benchmark):
benchmark(encode_integer, integer=120, prefix_bits=7)
def test_encode_small_integer_small_prefix(self, benchmark):
benchmark(encode_integer, integer=120, prefix_bits=1)
def test_encode_large_integer_large_prefix(self, benchmark):
benchmark(encode_integer, integer=120000, prefix_bits=7)
def test_encode_large_integer_small_prefix(self, benchmark):
benchmark(encode_integer, integer=120000, prefix_bits=1)
class TestHpackDecodingIntegersBenchmarks:
def test_decode_small_integer_large_prefix(self, benchmark):
data = bytes(encode_integer(integer=120, prefix_bits=7))
benchmark(decode_integer, data=data, prefix_bits=7)
def test_decode_small_integer_small_prefix(self, benchmark):
data = bytes(encode_integer(integer=120, prefix_bits=1))
benchmark(decode_integer, data=data, prefix_bits=1)
def test_decode_large_integer_large_prefix(self, benchmark):
data = bytes(encode_integer(integer=120000, prefix_bits=7))
benchmark(decode_integer, data=data, prefix_bits=7)
def test_decode_large_integer_small_prefix(self, benchmark):
data = bytes(encode_integer(integer=120000, prefix_bits=1))
benchmark(decode_integer, data=data, prefix_bits=1)
| 39
| 67
| 0.765073
| 184
| 1,443
| 5.641304
| 0.125
| 0.115607
| 0.146435
| 0.084778
| 0.878613
| 0.828516
| 0.764933
| 0.761079
| 0.761079
| 0.714836
| 0
| 0.03928
| 0.153153
| 1,443
| 36
| 68
| 40.083333
| 0.810147
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.038462
| 0
| 0.423077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ff3e384dafe96ef3ea1a91a1a05a9ae9d93d84c0
| 13,823
|
py
|
Python
|
src/attention.py
|
xunan0812/MultiSentiNet
|
1d7ec5d9bcf7bfea9f0b3a654053733dcb944696
|
[
"MIT"
] | 16
|
2018-01-09T11:46:44.000Z
|
2022-03-02T06:50:19.000Z
|
src/attention.py
|
anovier/MultiSentiNet
|
1d7ec5d9bcf7bfea9f0b3a654053733dcb944696
|
[
"MIT"
] | 4
|
2019-05-05T14:27:25.000Z
|
2021-12-16T03:49:32.000Z
|
src/attention.py
|
anovier/MultiSentiNet
|
1d7ec5d9bcf7bfea9f0b3a654053733dcb944696
|
[
"MIT"
] | 8
|
2018-09-11T08:45:51.000Z
|
2022-02-13T06:54:40.000Z
|
import numpy
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
from keras import initializations,regularizers,constraints
from keras.optimizers import SGD, RMSprop, Adagrad
class AttentionWithContext(Layer):
"""
Attention operation, with a context/query vector, for temporal data.
Supports Masking.
Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
"""
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializations.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(AttentionWithContext, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
uit = K.dot(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = K.dot(uit, self.u)
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def get_output_shape_for(self, input_shape):
return input_shape[0], input_shape[-1]
class Attention(Layer):
def __init__(self,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
"""
self.supports_masking = True
self.init = initializations.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
eij = K.dot(x, self.W)
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def get_output_shape_for(self, input_shape):
return input_shape[0], input_shape[-1]
class Attention_input1(Layer):
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
Wt_regularizer=None,
Wt_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializations.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.Wt_regularizer = regularizers.get(Wt_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.Wt_constraint = constraints.get(Wt_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention_input1, self).__init__(**kwargs)
def build(self, input_shape):
shape1 = input_shape[0]
shape2 = input_shape[1]
assert len(shape1) == 3
self.W = self.add_weight((shape1[-1], shape1[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.Wt = self.add_weight((shape2[-1],shape2[-1], ),
initializer=self.init,
name='{}_Wt'.format(self.name),
regularizer=self.Wt_regularizer,
constraint=self.Wt_constraint)
if self.bias:
self.b = self.add_weight((shape1[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight((shape1[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(Attention_input1, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, input, mask=None):
x = input[0]
t = input[1]
uit_x = K.dot(x, self.W)
uit_t = K.dot(t, self.Wt)
uit = uit_x + uit_t
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = K.dot(uit, self.u)
a = K.exp(ait)
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def get_output_shape_for(self, input_shape):
shape1 = input_shape[0]
return shape1[0], shape1[-1]
class Attention_input2(Layer):
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
Wo_regularizer=None,
Wo_constraint=None,
Ws_regularizer=None,
Ws_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializations.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.Wo_regularizer = regularizers.get(Wo_regularizer)
self.Ws_regularizer = regularizers.get(Ws_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.Wo_constraint = constraints.get(Wo_constraint)
self.Ws_constraint = constraints.get(Ws_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention_input2, self).__init__(**kwargs)
def build(self, input_shape):
shape1 = input_shape[0]
shape2 = input_shape[1]
shape3 = input_shape[1]
assert len(shape1) == 3
self.W = self.add_weight((shape1[-1], shape1[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.Wo = self.add_weight((shape2[-1],shape2[-1], ),
initializer=self.init,
name='{}_Wo'.format(self.name),
regularizer=self.Wo_regularizer,
constraint=self.Wo_constraint)
self.Ws = self.add_weight((shape3[-1],shape3[-1], ),
initializer=self.init,
name='{}_Ws'.format(self.name),
regularizer=self.Ws_regularizer,
constraint=self.Ws_constraint)
if self.bias:
self.b = self.add_weight((shape1[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight((shape1[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(Attention_input2, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, input, mask=None):
x = input[0]
o = input[1]
s = input[2]
uit_x = K.dot(x, self.W)
uit_o = K.dot(o, self.Wo)
uit_s = K.dot(s, self.Ws)
uit = uit_x + uit_o + uit_s
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = K.dot(uit, self.u)
a = K.exp(ait)
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def get_output_shape_for(self, input_shape):
shape1 = input_shape[0]
return shape1[0], shape1[-1]
| 36.959893
| 93
| 0.547132
| 1,589
| 13,823
| 4.594714
| 0.113908
| 0.065197
| 0.049856
| 0.047939
| 0.843172
| 0.827969
| 0.827969
| 0.82564
| 0.806602
| 0.794001
| 0
| 0.012286
| 0.352311
| 13,823
| 373
| 94
| 37.058981
| 0.803194
| 0.13615
| 0
| 0.736626
| 0
| 0
| 0.011221
| 0
| 0
| 0
| 0
| 0
| 0.016461
| 1
| 0.082305
| false
| 0
| 0.020576
| 0.024691
| 0.168724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4426d87647400769a1c0070ef36aae0b92eb89c4
| 4,438
|
py
|
Python
|
djstripe/migrations/0017_auto_20180110_0553.py
|
ComFreight/cmft-stripe-integration
|
85a2e14dcd6fffd24e999b1f383dd7eb006606e0
|
[
"MIT"
] | null | null | null |
djstripe/migrations/0017_auto_20180110_0553.py
|
ComFreight/cmft-stripe-integration
|
85a2e14dcd6fffd24e999b1f383dd7eb006606e0
|
[
"MIT"
] | null | null | null |
djstripe/migrations/0017_auto_20180110_0553.py
|
ComFreight/cmft-stripe-integration
|
85a2e14dcd6fffd24e999b1f383dd7eb006606e0
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-01-10 03:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0016_auto_20180109_2057'),
]
operations = [
migrations.RenameField(
model_name='account',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='account',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='card',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='card',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='charge',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='charge',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='coupon',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='coupon',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='customer',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='customer',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='dispute',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='dispute',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='event',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='event',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='invoice',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='invoice',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='invoiceitem',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='invoiceitem',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='payout',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='payout',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='plan',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='plan',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='source',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='source',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='subscription',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='subscription',
old_name='modified',
new_name='djstripe_updated',
),
migrations.RenameField(
model_name='transfer',
old_name='created',
new_name='djstripe_created',
),
migrations.RenameField(
model_name='transfer',
old_name='modified',
new_name='djstripe_updated',
),
]
| 28.818182
| 48
| 0.521857
| 364
| 4,438
| 6.046703
| 0.129121
| 0.267151
| 0.330759
| 0.381645
| 0.934121
| 0.934121
| 0.934121
| 0.815538
| 0.815538
| 0.815538
| 0
| 0.011115
| 0.371564
| 4,438
| 153
| 49
| 29.006536
| 0.778057
| 0.01014
| 0
| 0.952381
| 1
| 0
| 0.201093
| 0.005238
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006803
| 0
| 0.027211
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
445013288f73d7f191fc869387e65409058c74f5
| 2,923
|
py
|
Python
|
bank/src/deposit_app/permissions.py
|
yuramorozov01/bank_system
|
8d0cad692a89b913adb2df9a2a03d1793938a911
|
[
"Apache-2.0"
] | null | null | null |
bank/src/deposit_app/permissions.py
|
yuramorozov01/bank_system
|
8d0cad692a89b913adb2df9a2a03d1793938a911
|
[
"Apache-2.0"
] | null | null | null |
bank/src/deposit_app/permissions.py
|
yuramorozov01/bank_system
|
8d0cad692a89b913adb2df9a2a03d1793938a911
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import permissions
class CanAddDepositType(permissions.BasePermission):
'''Permission to check if current user has permission to add deposit types.'''
message = 'You don\'t have permissions to add a new deposit type.'
def has_permission(self, request, view):
return bool(request.user and request.user.has_perm('deposit_app.add_deposittype'))
class CanChangeDepositType(permissions.BasePermission):
'''Permission to check if current user has permission to change deposit types.'''
message = 'You don\'t have permissions to change the deposit type.'
def has_permission(self, request, view):
return bool(request.user and request.user.has_perm('deposit_app.change_deposittype'))
class CanDeleteDepositType(permissions.BasePermission):
'''Permission to check if current user has permission to delete deposit types.'''
message = 'You don\'t have permissions to delete the deposit type.'
def has_permission(self, request, view):
return bool(request.user and request.user.has_perm('deposit_app.delete_deposittype'))
class CanViewDepositType(permissions.BasePermission):
'''Permission to check if current user has permission to view deposit types.'''
message = 'You don\'t have permissions to view the deposit type.'
def has_permission(self, request, view):
return bool(request.user and request.user.has_perm('deposit_app.view_deposittype'))
class CanAddDepositContract(permissions.BasePermission):
'''Permission to check if current user has permission to add deposit contracts.'''
message = 'You don\'t have permissions to add a new deposit contract.'
def has_permission(self, request, view):
return bool(request.user and request.user.has_perm('deposit_app.add_depositcontract'))
class CanChangeDepositContract(permissions.BasePermission):
'''Permission to check if current user has permission to change deposit contracts.'''
message = 'You don\'t have permissions to change the deposit contract.'
def has_permission(self, request, view):
return bool(request.user and request.user.has_perm('deposit_app.change_depositcontract'))
class CanDeleteDepositContract(permissions.BasePermission):
'''Permission to check if current user has permission to delete deposit contracts.'''
message = 'You don\'t have permissions to delete the deposit contract.'
def has_permission(self, request, view):
return bool(request.user and request.user.has_perm('deposit_app.delete_depositcontract'))
class CanViewDepositContract(permissions.BasePermission):
'''Permission to check if current user has permission to view deposit contracts.'''
message = 'You don\'t have permissions to view the deposit contract.'
def has_permission(self, request, view):
return bool(request.user and request.user.has_perm('deposit_app.view_depositcontract'))
| 39.5
| 97
| 0.754362
| 375
| 2,923
| 5.792
| 0.130667
| 0.088398
| 0.128913
| 0.13628
| 0.841621
| 0.841621
| 0.841621
| 0.841621
| 0.841621
| 0.815838
| 0
| 0
| 0.158057
| 2,923
| 73
| 98
| 40.041096
| 0.882568
| 0.209716
| 0
| 0.242424
| 0
| 0
| 0.136624
| 0.108418
| 0
| 0
| 0
| 0
| 0
| 1
| 0.242424
| false
| 0
| 0.030303
| 0.242424
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
4716ebca124223889a0fb994b57ae2d8c7b4bc6a
| 16,946
|
py
|
Python
|
esrl/optimizers.py
|
JamiesonWarner/High-Dim-ES-RL
|
5037fa12c1300884d88eb5cde46999e6cc783674
|
[
"MIT"
] | null | null | null |
esrl/optimizers.py
|
JamiesonWarner/High-Dim-ES-RL
|
5037fa12c1300884d88eb5cde46999e6cc783674
|
[
"MIT"
] | null | null | null |
esrl/optimizers.py
|
JamiesonWarner/High-Dim-ES-RL
|
5037fa12c1300884d88eb5cde46999e6cc783674
|
[
"MIT"
] | null | null | null |
'''This module contains the following:
LMMAES
An ES for problems in dimensions >> 100.
MAES
An ES for problems in dimensions > 100.
ES
A stripped down version of the LMMAES implementation.
Featuring no CMA or approximation. ES is reasonable to use
in extremely high dimension.
'''
import numpy as np
from multiprocessing.dummy import Pool
from .base import BaseOptimizer
class LMMAES(BaseOptimizer):
'''LM-MA-ES for black box optimization.
A limited memory(/time) version of CMA-ES. Useful for badly conditioned
functions with high dimensional real parameter spaces.
Reference:
https://arxiv.org/pdf/1705.06693.pdf
'''
def __init__(self, y0, sigma, f, function_budget=10000, function_target=None,
rng=np.random.RandomState(), threads=1, lmbd=None):
'''Initialization of the LMMAES
Args:
y0 (numpy.ndarray):
Initial candidate solution. A numpy array of dimension n.
Optimum should not be more distant than 3*step_size.
sigma (float):
Global step size or mutation strength.
f (function):
Fitness function, taking a candidate as input.
function_budget (int, optional):
Maximum number of function evaluations. Defaults to 10000.
If function_budget and function_target are not specified the
algorithm does not terminate automatically.
function_target (numeric, optional):
Target function value f(y*). If function_budget and function_target
are not specified the algorithm does not terminate automatically.
rng (class instance, optional):
Random number generator similar to numpy's np.random.RandomState().
Requires at least a method similar to np.randn.
threads (int, optional):
The number of threads to use to evalutate candidates.
lmbd (int, optional):
Number of evolution paths, the rank of the covariance
matrix approximation. The value is tied to the number of
selected candidates by self.mu = self.lmbd//2, as well as
equal to the number of candidates self.m.
Setting this manually might offset some constants.
'''
super().__init__()
self.function_evals = 0
# set if required
self.function_budget = function_budget
self.function_target = function_target
# set random number generator
self.rng = rng
# initialize pool
self.pool = Pool(threads)
# 1: given
self.n = len(y0)
if lmbd != None:
self.lmbd = lmbd
else:
self.lmbd = int(4 + np.floor(3 * np.log(self.n)))
# otherwise tuning constants break - use standard CMA-ES instead :)
assert self.lmbd < self.n
self.mu = self.lmbd//2
self.w = np.array([np.log(self.mu + 0.5) - np.log(i + 1)
for i in range(self.mu)])
self.w /= np.sum(self.w)
self.mu_w = 1 / np.sum(np.square(self.w))
self.m = self.lmbd
self.c_sigma = (2*self.lmbd)/self.n
self.c_d = np.zeros((self.m,))
self.c_c = np.zeros((self.m,))
for i in range(self.m):
self.c_d[i] = 1 / (np.power(1.5, i) * self.n)
self.c_c[i] = self.lmbd / (np.power(4.0, i) * self.n)
# 2: initialize
self.t = 0
self.y = y0
self.f = f
self.sigma = sigma
self.p_sigma = np.zeros((self.n,))
self.M = np.zeros((self.m, self.n))
# useful values
self.c_sigma_update = np.sqrt(self.mu_w*self.c_sigma*(2-self.c_sigma))
self.c_c_update = np.sqrt(self.mu_w*self.c_c*(2-self.c_c))
self.fd = np.zeros((self.lmbd,))
# deviation from the paper
# damping constant
self.d_sigma = 2
# ~ expected length of normally distributed vector
self.chi = np.sqrt(self.n) * (1 - (1/(4*self.n)) -
(1/(21*self.n*self.n)))
def step(self):
'''Optimization step of the LMMAES.
Returns:
Tuple of (function_evals, False, False). If terminated a
Tuple of (function_evals, y, a_flag). a_flag is a letter
specifying the termination criterion. Either 'B' or 'T'.
'''
# sample offspring, vectorized version
self.z = self.rng.randn(self.lmbd, self.n)
self.d = np.copy(self.z)
for j in range(min(self.t, self.m)):
self.d = ((1 - self.c_d[j]) * self.d) + (self.c_d[j] *
np.outer(np.dot(self.d, self.M[j, :]), self.M[j, :]))
# evaluate offspring and check stopping criteria
self.x = [(self.y + self.sigma * self.d[i, :])
for i in range(self.lmbd)]
self.fd = self.pool.map(self.f, self.x)
self.function_evals += self.lmbd
if self.reachedFunctionBudget(self.function_budget, self.function_evals):
# if budget is reached return parent
return self.function_evals, self.y, 'B'
if self.function_target != None:
if self.reachedFunctionTarget(self.function_target, np.mean(self.fd)):
# if function target is reach return population expected value
return self.function_evals, self.y, 'T'
# sort by fitness
self.order = np.argsort(self.fd)
# update mean
for i in range(self.mu):
self.y += self.sigma * self.w[i] * self.d[self.order[i], :]
# compute weighted mean
self.wz = 0
for i in range(self.mu):
self.wz += self.w[i] * self.z[self.order[i], :]
# update evolution path
self.p_sigma *= 1 - self.c_sigma
self.p_sigma += self.c_sigma_update * self.wz
# update direction vectors
for i in range(self.m):
self.M[i, :] = ((1 - self.c_c[i]) * self.M[i, :]) + \
(self.c_c_update[i] * self.wz)
# update step size
self.sigma *= np.exp((self.c_sigma / self.d_sigma) *
((np.square(np.linalg.norm(self.p_sigma)) / self.n) - 1))
# generation counter
self.t += 1
return self.function_evals, False, False
class MAES(BaseOptimizer):
'''MA-ES for black box optimization.
Reference:
https://arxiv.org/pdf/1705.06693.pdf
'''
def __init__(self, y0, sigma, f, function_budget=10000, function_target=None,
rng=np.random.RandomState(), threads=1, lmbd=None):
'''Initialization of the LMMAES
Args:
y0 (numpy.ndarray):
Initial candidate solution. A numpy array of dimension n.
Optimum should not be more distant than 3*step_size.
sigma (float):
Global step size or mutation strength.
f (function):
Fitness function, taking a candidate as input.
function_budget (int, optional):
Maximum number of function evaluations. Defaults to 10000.
If function_budget and function_target are not specified the
algorithm does not terminate automatically.
function_target (numeric, optional):
Target function value f(y*). If function_budget and function_target
are not specified the algorithm does not terminate automatically.
rng (class instance, optional):
Random number generator similar to numpy's np.random.RandomState().
Requires at least a method similar to np.randn.
threads (int, optional):
The number of threads to use to evalutate candidates.
lmbd (int, optional):
Number of evolution paths, the rank of the covariance
matrix approximation. The value is tied to the number of
selected candidates by self.mu = self.lmbd//2, as well as
equal to the number of candidates self.m.
Setting this manually might offset some constants.
'''
# initializes self.save_to and self.buffer_length and
# if required self.log, self.log_iterator
super().__init__()
self.function_evals = 0
# set if required
self.function_budget = function_budget
self.function_target = function_target
# set random number generator
self.rng = rng
# initialize pool
self.pool = Pool(threads)
# 1: given
self.n = len(y0)
if lmbd != None:
self.lmbd = lmbd
else:
self.lmbd = int(4 + np.floor(3 * np.log(self.n)))
# otherwise tuning constants break - use standard CMA-ES instead :)
assert self.lmbd < self.n
self.mu = self.lmbd//2
self.w = np.array([np.log(self.mu + 0.5) - np.log(i + 1)
for i in range(self.mu)])
self.w /= np.sum(self.w)
self.mu_w = 1 / np.sum(np.square(self.w))
self.c_sigma = (self.mu_w+2)/(self.n+self.mu_w+5)
self.c_1 = 2/(np.power((self.n + 1.3), 2)+self.mu_w)
self.c_mu = min(1-self.c_1, 2*(self.mu_w-2+(1/self.mu_w)
)/(np.power(self.n+2, 2)+self.mu_w))
# 2: initialize
self.t = 0
self.y = y0
self.f = f
self.sigma = sigma
self.p_sigma = np.zeros((self.n,))
self.M = np.identity(self.n)
# useful values
self.c_sigma_update = np.sqrt(self.mu_w*self.c_sigma*(2-self.c_sigma))
self.fd = np.zeros((self.lmbd,))
# deviation from the paper
# damping constant
self.d_sigma = 2
# ~ expected lengthof normal distributed vector
self.chi = np.sqrt(self.n) * (1 - (1/(4*self.n)) -
(1/(21*self.n*self.n)))
def step(self):
'''Optimization step of the MAES.
Returns:
Tuple of (function_evals, False, False). If terminated a
Tuple of (function_evals, y, a_flag). a_flag is a letter
specifying the termination criterion. Either 'B' or 'T'.
'''
# sample offspring, vectorized version
self.z = self.rng.randn(self.n, self.lmbd)
self.d = np.matmul(self.M, self.z)
# evaluate offspring and check stopping criteria
self.x = [(self.y + self.sigma * self.d[:, i])
for i in range(self.lmbd)]
self.fd = self.pool.map(self.f, self.x)
self.function_evals += self.lmbd
if self.reachedFunctionBudget(self.function_budget, self.function_evals):
# if budget is reached return parent
return self.function_evals, self.y, 'B'
if self.function_target != None:
if self.reachedFunctionTarget(self.function_target, np.mean(self.fd)):
# if function target is reached, return population expected value
return self.function_evals, self.y, 'T'
# sort by fitness
self.order = np.argsort(self.fd)
# update mean
for i in range(self.mu):
self.y += self.sigma * self.w[i] * self.d[:, self.order[i]]
# pre-compute
self.d_sigma_M = np.dot(self.M, self.p_sigma)
# compute weighted mean
self.wz = 0
temp2 = np.zeros((self.n, self.n))
for i in range(self.mu):
temp = self.w[i] * self.z[:, self.order[i]]
self.wz += temp
temp2 += np.outer(self.d[:, self.order[i]], temp)
# update evolution path
self.p_sigma *= 1 - self.c_sigma
self.p_sigma += self.c_sigma_update * self.wz
# update matrix
self.M *= (1-(self.c_1/2)-(self.c_mu/2))
self.M += (np.outer((self.c_1/2)*self.d_sigma_M,
self.p_sigma))+((self.c_mu/2)*temp2)
# update step size
self.sigma *= np.exp((self.c_sigma / self.d_sigma) *
((np.square(np.linalg.norm(self.p_sigma)) / self.n) - 1))
# generation counter
self.t += 1
return self.function_evals, False, False
class ES(BaseOptimizer):
'''ES for black box optimization.
'''
def __init__(self, y0, sigma, f, function_budget=10000, function_target=None,
rng=np.random.RandomState(), threads=1):
'''Initialization of the ES.
Args:
y0 (numpy.ndarray):
Initial candidate solution. A numpy array of dimension n.
Optimum should not be more distant than 3*step_size.
sigma (float):
Global step size or mutation strength.
f (function):
Fitness function, taking a candidate as input.
function_budget (int, optional):
Maximum number of function evaluations. Defaults to 10000.
If function_budget and function_target are not specified the
algorithm does not terminate automatically.
function_target (numeric, optional):
Target function value f(y*). If function_budget and function_target
are not specified the algorithm does not terminate automatically.
rng (class instance, optional):
Random number generator similar to numpy's np.random.RandomState().
Requires at least a method similar to np.randn.
threads (int, optional):
The number of threads to use to evalutate candidates.
'''
super().__init__()
self.function_evals = 0
# set if required
self.function_budget = function_budget
self.function_target = function_target
# set random number generator
self.rng = rng
# initialize pool
self.pool = Pool(threads)
# 1: given
self.n = len(y0)
self.lmbd = int(4 + np.floor(3 * np.log(self.n)))
# otherwise tuning constants break - use standard CMA-ES instead :)
assert self.lmbd < self.n
self.mu = self.lmbd//2
self.w = np.array([np.log(self.mu + 0.5) - np.log(i + 1)
for i in range(self.mu)])
self.w /= np.sum(self.w)
self.mu_w = 1 / np.sum(np.square(self.w))
self.c_sigma = (2*self.lmbd)/self.n
# 2: initialize
self.t = 0
self.y = y0
self.f = f
self.sigma = sigma
self.p_sigma = np.zeros((self.n,))
# useful values
self.c_sigma_update = np.sqrt(self.mu_w*self.c_sigma*(2-self.c_sigma))
self.fd = np.zeros((self.lmbd,))
# deviation from the paper
# damping constant
self.d_sigma = 2
# ~ expected lengthof normal distributed vector
self.chi = np.sqrt(self.n) * (1 - (1/(4*self.n)) -
(1/(21*self.n*self.n)))
def step(self):
'''Optimization step of the ES.
Returns:
Tuple of (function_evals, False, False). If terminated a
Tuple of (function_evals, y, a_flag). a_flag is a letter
specifying the termination criterion. Either 'B' or 'T'.
'''
# sample offspring, vectorized version
self.z = self.rng.randn(self.lmbd, self.n)
# evaluate offspring and check stopping criteria
self.x = [(self.y + self.sigma * self.z[i, :])
for i in range(self.lmbd)]
self.fd = self.pool.map(self.f, self.x)
self.function_evals += self.lmbd
if self.reachedFunctionBudget(self.function_budget, self.function_evals):
# if budget is reached return parent
return self.function_evals, self.y, 'B'
if self.function_target != None:
if self.reachedFunctionTarget(self.function_target, np.mean(self.fd)):
# if function target is reached, return population expected value
return self.function_evals, self.y, 'T'
# sort by fitness
self.order = np.argsort(self.fd)
# update mean
for i in range(self.mu):
self.y += self.sigma * self.w[i] * self.z[self.order[i], :]
# compute weighted mean
self.wz = 0
for i in range(self.mu):
self.wz += self.w[i] * self.z[self.order[i], :]
# update evolution path
self.p_sigma *= 1 - self.c_sigma
self.p_sigma += self.c_sigma_update * self.wz
# update step size
self.sigma *= np.exp((self.c_sigma / self.d_sigma) *
((np.square(np.linalg.norm(self.p_sigma)) / self.n) - 1))
# generation counter
self.t += 1
return self.function_evals, False, False
| 35.084886
| 106
| 0.566682
| 2,256
| 16,946
| 4.172872
| 0.115248
| 0.021776
| 0.022307
| 0.016359
| 0.899405
| 0.882197
| 0.869344
| 0.858721
| 0.850329
| 0.850329
| 0
| 0.015545
| 0.328101
| 16,946
| 482
| 107
| 35.157676
| 0.811259
| 0.431193
| 0
| 0.783069
| 0
| 0
| 0.00064
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 1
| 0.031746
| false
| 0
| 0.015873
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b3ed676b6e61e9634e66548333786246b134d68
| 1,887
|
py
|
Python
|
cloud-function/signedUrls.py
|
dolbyio-samples/blog-gcp-media-automations
|
7e0dc62fc43c6ff4d6cf18f220d7a1fba19491c8
|
[
"CC0-1.0"
] | null | null | null |
cloud-function/signedUrls.py
|
dolbyio-samples/blog-gcp-media-automations
|
7e0dc62fc43c6ff4d6cf18f220d7a1fba19491c8
|
[
"CC0-1.0"
] | null | null | null |
cloud-function/signedUrls.py
|
dolbyio-samples/blog-gcp-media-automations
|
7e0dc62fc43c6ff4d6cf18f220d7a1fba19491c8
|
[
"CC0-1.0"
] | null | null | null |
# pip install google-cloud-storage
import datetime
import os
from google.cloud import storage
def generate_download_signed_url_v4(bucket_name, blob_name):
# If you don't have the authentication file set to an environment variable
# See: https://cloud.google.com/docs/authentication/getting-started for more information
# os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="<KEY_FILE>.json"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
url = blob.generate_signed_url(
version="v4",
# This URL is valid for 15 minutes
expiration=datetime.timedelta(minutes=15),
# Allow GET requests using this URL.
method="GET",
)
print("Generated GET signed URL:")
print(url)
print("You can use this URL with any user agent, for example:")
print("curl '{}'".format(url))
return url
def generate_upload_signed_url_v4(bucket_name, blob_name):
# If you don't have the authentication file set to an environment variable
# See: https://cloud.google.com/docs/authentication/getting-started for more information
# os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="<KEY_FILE>.json"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
url = blob.generate_signed_url(
version="v4",
# This URL is valid for 15 minutes
expiration=datetime.timedelta(minutes=15),
# Allow PUT requests using this URL.
method="PUT",
content_type="application/octet-stream",
)
print("Generated PUT signed URL:")
print(url)
print("You can use this URL with any user agent, for example:")
print(
"curl -X PUT -H 'Content-Type: application/octet-stream' "
"--upload-file my-file '{}'".format(url)
)
return url
| 33.105263
| 92
| 0.686804
| 251
| 1,887
| 5.039841
| 0.318725
| 0.042688
| 0.044269
| 0.026877
| 0.828459
| 0.735178
| 0.735178
| 0.735178
| 0.735178
| 0.735178
| 0
| 0.008043
| 0.209327
| 1,887
| 56
| 93
| 33.696429
| 0.839812
| 0.325384
| 0
| 0.514286
| 1
| 0
| 0.224425
| 0.038858
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.085714
| 0
| 0.2
| 0.228571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b8a1aef65fbefaf1db792a7f90ccd9a924998c2
| 7,367
|
py
|
Python
|
tests/integration/test_obtained_bibtex_from_raw_input.py
|
iluxonchik/rfc-bibtex-deprecated
|
8199cb7c6e8d38c734e4cfbca12b5900a5409571
|
[
"MIT"
] | 41
|
2018-01-04T06:18:49.000Z
|
2022-02-15T07:04:21.000Z
|
tests/integration/test_obtained_bibtex_from_raw_input.py
|
iluxonchik/rfc-bibtex-deprecated
|
8199cb7c6e8d38c734e4cfbca12b5900a5409571
|
[
"MIT"
] | 21
|
2018-04-02T10:57:17.000Z
|
2020-10-16T09:50:35.000Z
|
tests/integration/test_obtained_bibtex_from_raw_input.py
|
iluxonchik/rfc-bibtex-deprecated
|
8199cb7c6e8d38c734e4cfbca12b5900a5409571
|
[
"MIT"
] | 4
|
2018-09-19T12:41:18.000Z
|
2021-01-17T13:18:26.000Z
|
"""
Integration tests related to reading raw list of RFCs providied in a text file (one per line)
and directly as command-line args.
"""
import vcr
from .base import BaseRFCBibTexIntegrationTestCase
from rfc_bibtex.rfc_bibtex import RFCBibtex
"""
NOTE: the tests are written with the migrations that will be done in consideration.
For example, the URLs from which the RFC will be obtained will be changed,
with this, there is also a change in the returned content. For this reason,
tests were adapted to work in both of them. This is why some assertions might
seem like incomplete. Later, the tests should and will be refactored.
"""
class ObtainedBibtexFromRawRFCInputTestCase(BaseRFCBibTexIntegrationTestCase):
TLS_RFCS_FILE = BaseRFCBibTexIntegrationTestCase.TEST_RESOURCES_PATH + "tls_rfcs.txt"
TLS_RFCS_INVALID_IDS_FILE = BaseRFCBibTexIntegrationTestCase.TEST_RESOURCES_PATH + "tls_rfcs_invalid_ids.txt"
TLS_RFCS_NON_EXISTING_IDS_FILE = BaseRFCBibTexIntegrationTestCase.TEST_RESOURCES_PATH + "tls_rfcs_non_existing_ids.txt"
def setUp(self):
pass
def tearDown(self):
pass
@vcr.use_cassette(path='tests/integration/resources/fixtures/vcr_cassettes/synopsis.yaml', record_mode='new_episodes')
def test_reading_rfcs_from_command_line_returns_expected_latex(self):
rfc_bibtex = RFCBibtex(['rfc5246', 'draft-ietf-tls-tls13-21', 'RFC8446'])
entries = list(rfc_bibtex.bibtex_entries)
self.assertEqual(len(entries), 3)
self.assertIn("rfc5246", entries[0])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.2", entries[0])
self.assertIn("2008", entries[0])
self.assertIn("RFC Editor", entries[0])
self.assertIn("{draft-ietf-tls-tls13-21}", entries[1])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[1])
self.assertIn("RFC8446", entries[2])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[2])
self.assertIn("2018", entries[2])
self.assertIn("RFC Editor", entries[2])
@vcr.use_cassette(path='tests/integration/resources/fixtures/vcr_cassettes/synopsis.yaml', record_mode='new_episodes')
def test_reading_rfcs_from_file_returns_expected_latex(self):
rfc_bibtex = RFCBibtex(in_file_names=[self.TLS_RFCS_FILE])
entries = list(rfc_bibtex.bibtex_entries)
self.assertEqual(len(entries), 3)
self.assertIn("RFC5246", entries[0])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.2", entries[0])
self.assertIn("2008", entries[0])
self.assertIn("RFC Editor", entries[0])
self.assertIn("{draft-ietf-tls-tls13-21}", entries[1])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[1])
self.assertIn("RFC8446", entries[2])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[2])
self.assertIn("2018", entries[2])
self.assertIn("RFC Editor", entries[2])
@vcr.use_cassette(path='tests/integration/resources/fixtures/vcr_cassettes/synopsis.yaml', record_mode='new_episodes')
def test_reading_rfcs_with_invalid_ids_from_file_returns_expected_latex(self):
rfc_bibtex = RFCBibtex(in_file_names=[self.TLS_RFCS_INVALID_IDS_FILE])
entries = list(rfc_bibtex.bibtex_entries)
self.assertEqual(len(entries), 3)
self.assertIn("RFC5246", entries[0])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.2", entries[0])
self.assertIn("2008", entries[0])
self.assertIn("RFC Editor", entries[0])
self.assertIn("{draft-ietf-tls-tls13-21}", entries[1])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[1])
self.assertIn("RFC8446", entries[2])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[2])
self.assertIn("2018", entries[2])
self.assertIn("RFC Editor", entries[2])
@vcr.use_cassette(path='tests/integration/resources/fixtures/vcr_cassettes/synopsis.yaml', record_mode='new_episodes')
def test_reading_rfcs_with_non_existing_ids_from_file_returns_expected_latex(self):
rfc_bibtex = RFCBibtex(in_file_names=[self.TLS_RFCS_NON_EXISTING_IDS_FILE])
entries = list(rfc_bibtex.bibtex_entries)
self.assertEqual(len(entries), 3)
self.assertIn("RFC5246", entries[0])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.2", entries[0])
self.assertIn("2008", entries[0])
self.assertIn("RFC Editor", entries[0])
self.assertIn("{draft-ietf-tls-tls13-21}", entries[1])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[1])
self.assertIn("RFC8446", entries[2])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[2])
self.assertIn("2018", entries[2])
self.assertIn("RFC Editor", entries[2])
@vcr.use_cassette(path='tests/integration/resources/fixtures/vcr_cassettes/synopsis.yaml', record_mode='new_episodes')
def test_reading_rfcs_from_command_line_with_invalid_ids_returns_expected_latex(self):
"""
Test that invalid RFC/draft IDs don't break the program.
"""
rfc_bibtex = RFCBibtex(['rFc5246', 'TheDocumentery', 'DraFt-ietf-Tls-tLs13-21', 'RFC8446', 'TheChronic'])
entries = list(rfc_bibtex.bibtex_entries)
self.assertEqual(len(entries), 3)
self.assertIn("rFc5246", entries[0])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.2", entries[0])
self.assertIn("2008", entries[0])
self.assertIn("RFC Editor", entries[0])
self.assertIn("{DraFt-ietf-Tls-tLs13-21,", entries[1])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[1])
self.assertIn("RFC8446", entries[2])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[2])
self.assertIn("2018", entries[2])
self.assertIn("RFC Editor", entries[2])
@vcr.use_cassette(path='tests/integration/resources/fixtures/vcr_cassettes/synopsis.yaml', record_mode='new_episodes')
def test_reading_rfcs_from_command_line_with_non_existing_rfcs_returns_expected_latex(self):
"""
Test that non-existing RFC/draft IDs don't break the program.
"""
rfc_bibtex = RFCBibtex(['RFC5246', 'RFC9999', 'draft-ietf-tls-tls13-21', 'draft-the-doctors-advocate', 'RFC8446', 'RFC0101'])
entries = list(rfc_bibtex.bibtex_entries)
self.assertEqual(len(entries), 3)
self.assertIn("RFC5246", entries[0])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.2", entries[0])
self.assertIn("2008", entries[0])
self.assertIn("RFC Editor", entries[0])
self.assertIn("{draft-ietf-tls-tls13-21}", entries[1])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[1])
self.assertIn("RFC8446", entries[2])
self.assertIn("The Transport Layer Security (TLS) Protocol Version 1.3", entries[2])
self.assertIn("2018", entries[2])
self.assertIn("RFC Editor", entries[2])
| 51.880282
| 133
| 0.699742
| 970
| 7,367
| 5.164948
| 0.136082
| 0.143713
| 0.057485
| 0.095808
| 0.846307
| 0.835529
| 0.806587
| 0.786228
| 0.761078
| 0.761078
| 0
| 0.043822
| 0.176055
| 7,367
| 141
| 134
| 52.248227
| 0.781549
| 0.033528
| 0
| 0.762376
| 0
| 0
| 0.313938
| 0.102541
| 0
| 0
| 0
| 0
| 0.653465
| 1
| 0.079208
| false
| 0.019802
| 0.029703
| 0
| 0.148515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5bfb7923858a4ca33f18cc10a57ed71d652bc07a
| 118
|
py
|
Python
|
det3d/version.py
|
PatrickChoDev/CSA-3D
|
34aa87194d653a93f16834d485738255f55112f0
|
[
"Apache-2.0"
] | null | null | null |
det3d/version.py
|
PatrickChoDev/CSA-3D
|
34aa87194d653a93f16834d485738255f55112f0
|
[
"Apache-2.0"
] | null | null | null |
det3d/version.py
|
PatrickChoDev/CSA-3D
|
34aa87194d653a93f16834d485738255f55112f0
|
[
"Apache-2.0"
] | null | null | null |
# GENERATED VERSION FILE
# TIME: Sat Jul 3 23:57:03 2021
__version__ = '1.0.0rc1+d492845'
short_version = '1.0.0rc1'
| 23.6
| 32
| 0.711864
| 21
| 118
| 3.761905
| 0.761905
| 0.202532
| 0.227848
| 0.329114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.152542
| 118
| 4
| 33
| 29.5
| 0.54
| 0.449153
| 0
| 0
| 1
| 0
| 0.387097
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f38bc1c10273f193615692fa486a6f10e9d962e0
| 44
|
py
|
Python
|
disaggregator/build/pandas/pandas/version.py
|
pjkundert/wikienergy
|
ac3a13780bccb001c81d6f8ee27d3f5706cfa77e
|
[
"MIT"
] | 29
|
2015-01-08T19:20:37.000Z
|
2021-04-20T08:25:56.000Z
|
disaggregator/build/pandas/pandas/version.py
|
afcarl/wikienergy
|
ac3a13780bccb001c81d6f8ee27d3f5706cfa77e
|
[
"MIT"
] | null | null | null |
disaggregator/build/pandas/pandas/version.py
|
afcarl/wikienergy
|
ac3a13780bccb001c81d6f8ee27d3f5706cfa77e
|
[
"MIT"
] | 17
|
2015-02-01T18:12:04.000Z
|
2020-06-15T14:13:04.000Z
|
version = '0.15.2'
short_version = '0.15.2'
| 14.666667
| 24
| 0.636364
| 9
| 44
| 3
| 0.555556
| 0.592593
| 0.740741
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 0.136364
| 44
| 2
| 25
| 22
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
45f74dbb8b411df38df3c015116ed65e298d96cc
| 74,626
|
py
|
Python
|
tests/test_entities_service.py
|
CMoncur/launchkey-python
|
7bed012c183e5011fbeddd9353ed70fde899f932
|
[
"MIT"
] | null | null | null |
tests/test_entities_service.py
|
CMoncur/launchkey-python
|
7bed012c183e5011fbeddd9353ed70fde899f932
|
[
"MIT"
] | null | null | null |
tests/test_entities_service.py
|
CMoncur/launchkey-python
|
7bed012c183e5011fbeddd9353ed70fde899f932
|
[
"MIT"
] | null | null | null |
import unittest
from mock import MagicMock, patch
from ddt import data, unpack, ddt
from formencode import Invalid
from datetime import time
from launchkey.entities.service import AuthorizationResponse, \
AuthResponseType, AuthResponseReason, GeoFence, TimeFence, \
AuthMethodType, AuthPolicy, AuthorizationRequest, AuthMethod
from launchkey.exceptions import UnexpectedDeviceResponse
from launchkey.transports.jose_auth import JOSETransport
@ddt
class TestAuthorizationResponse(unittest.TestCase):
def setUp(self):
self.data = {
"auth": "auth data",
"auth_jwe": "auth jwe data",
"service_user_hash": "vf8fg663aauTkVUFCiR0Er6kctIN9d6958hkzznVHF9",
"user_push_id": "399e1d6c-f651-5b82-9dff-d5d63f16c849",
"org_user_hash": "SlwSGZz0M9kPtZUL6mzAGjdYcmdUS1jHccRKOJ9rTMO",
"public_key_id": "56:66:9d:72:f8:c3:e0:0b:3d:52:f4:81:36:f1:cc:74"
}
self.transport = MagicMock(spec=JOSETransport)
self.json_loads_patch = patch('launchkey.entities.service.loads').start()
self.json_loads_patch.return_value = {
"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008",
"type": "AUTHORIZED",
"reason": "APPROVED",
"denial_reason": "32",
"service_pins": ["1", "2", "3"],
"device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008"
}
self.addCleanup(patch.stopall)
def test_authorization_response_jwe_response_data(self):
self.data = {
"auth": "auth data",
"auth_jwe": "auth jwe data",
"service_user_hash": "Service User Hash",
"user_push_id": "User Push ID",
"org_user_hash": "Org User Hash",
"public_key_id": "56:66:9d:72:f8:c3:e0:0b:3d:52:f4:81:36:f1:cc:74"
}
self.json_loads_patch.return_value = {
"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008",
"type": "AUTHORIZED",
"reason": "APPROVED",
"denial_reason": "32",
"service_pins": ["1", "2", "3"],
"device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008"
}
response = AuthorizationResponse(self.data, self.transport)
self.json_loads_patch.assert_called_with(self.transport.decrypt_response.return_value)
self.assertEqual(response.authorization_request_id, "62e09ff8-f9a9-11e8-bbe2-0242ac130008")
self.assertEqual(response.authorized, True)
self.assertEqual(response.device_id, "31e5b804-f9a7-11e8-97ef-0242ac130008")
self.assertEqual(response.service_pins, ["1", "2", "3"])
self.assertEqual(response.service_user_hash, "Service User Hash")
self.assertEqual(response.organization_user_hash, "Org User Hash")
self.assertEqual(response.user_push_id, "User Push ID")
self.assertEqual(response.type.value, "AUTHORIZED")
self.assertEqual(response.reason.value, "APPROVED")
self.assertEqual(response.denial_reason, "32")
def test_authorization_response_response_jwe_authorized_true(self):
self.json_loads_patch.return_value['type'] = "AUTHORIZED"
self.json_loads_patch.return_value['reason'] = "APPROVED"
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.authorized, True)
@data(
("DENIED", "DISAPPROVED"),
("DENIED", "FRAUDULENT"),
("FAILED", "BUSY_LOCAL"),
("FAILED", "PERMISSION"),
("FAILED", "AUTHENTICATION"),
("FAILED", "CONFIGURATION"),
("FAILED", "SENSOR"),
("OTHER", "OTHER"),
("TESTING", "TESTING")
)
@unpack
def test_authorization_response_response_jwe_authorized_false(self,
resp_type,
resp_reason):
self.json_loads_patch.return_value['type'] = resp_type
self.json_loads_patch.return_value['reason'] = resp_reason
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.authorized, False)
@data(
("AUTHORIZED", "APPROVED"),
("DENIED", "DISAPPROVED"),
("DENIED", "FRAUDULENT"),
("FAILED", "BUSY_LOCAL"),
("FAILED", "PERMISSION"),
("FAILED", "AUTHENTICATION"),
("FAILED", "CONFIGURATION"),
("FAILED", "SENSOR"),
("OTHER", "OTHER")
)
@unpack
def test_authorization_response_response_jwe_response_context(self,
resp_type,
resp_reason):
self.json_loads_patch.return_value['type'] = resp_type
self.json_loads_patch.return_value['reason'] = resp_reason
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType(resp_type))
self.assertEqual(response.reason, AuthResponseReason(resp_reason))
@data(
"APPROVED",
"DISAPPROVED",
"FRAUDULENT",
"BUSY_LOCAL",
"PERMISSION",
"AUTHENTICATION",
"CONFIGURATION",
"SENSOR",
"OTHER"
)
def test_authorization_response_response_jwe_response_context_unknown_type(
self,
resp_reason):
self.json_loads_patch.return_value['type'] = "UNKNOWN"
self.json_loads_patch.return_value['reason'] = resp_reason
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.OTHER)
self.assertEqual(response.reason, AuthResponseReason(resp_reason))
@data("AUTHORIZED", "DENIED", "FAILED")
def test_authorization_response_response_jwe_response_context_unknown_reas(
self,
resp_type):
self.json_loads_patch.return_value['type'] = resp_type
self.json_loads_patch.return_value['reason'] = "UNKNOWN"
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType(resp_type))
self.assertEqual(response.reason, AuthResponseReason.OTHER)
@data("AUTHORIZED", "DENIED", "FAILED")
def test_authorization_response_response_jwe_response_context_fraud(
self,
resp_type):
self.json_loads_patch.return_value['type'] = resp_type
self.json_loads_patch.return_value['reason'] = "FRAUDULENT"
response = AuthorizationResponse(self.data, self.transport)
self.assertTrue(response.fraud, True)
@data(
"APPROVED",
"DISAPPROVED",
"BUSY_LOCAL",
"PERMISSION",
"AUTHENTICATION",
"CONFIGURATION",
"SENSOR",
"OTHER"
)
def test_authorization_response_response_jwe_response_context_not_fraud(
self,
resp_reason):
self.json_loads_patch.return_value['type'] = "UNKNOWN"
self.json_loads_patch.return_value['reason'] = resp_reason
response = AuthorizationResponse(self.data, self.transport)
self.assertFalse(response.fraud)
@data(Invalid, TypeError, ValueError)
def test_authorization_response_response_jwe_unexpected_device_response(
self, exc):
self.transport.decrypt_response.side_effect = exc
with self.assertRaises(UnexpectedDeviceResponse):
AuthorizationResponse(self.data, self.transport)
def test_authorization_response_rsa_response_data(self):
self.data = {
"auth": "auth data",
"service_user_hash": "Service User Hash",
"user_push_id": "User Push ID",
"org_user_hash": "Org User Hash",
"public_key_id": "56:66:9d:72:f8:c3:e0:0b:3d:52:f4:81:36:f1:cc:74"
}
self.json_loads_patch.return_value = {
"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008",
"response": True,
"service_pins": ["1", "2", "3"],
"device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008"
}
response = AuthorizationResponse(self.data, self.transport)
self.json_loads_patch.assert_called_with(
self.transport.decrypt_rsa_response.return_value)
self.assertEqual(response.authorization_request_id, "62e09ff8-f9a9-11e8-bbe2-0242ac130008")
self.assertEqual(response.authorized, True)
self.assertEqual(response.device_id, "31e5b804-f9a7-11e8-97ef-0242ac130008")
self.assertEqual(response.service_pins, ["1", "2", "3"])
self.assertEqual(response.service_user_hash, "Service User Hash")
self.assertEqual(response.organization_user_hash, "Org User Hash")
self.assertEqual(response.user_push_id, "User Push ID")
self.assertIsNone(response.type)
self.assertIsNone(response.reason)
self.assertIsNone(response.denial_reason)
self.assertIsNone(response.fraud)
def test_authorization_response_response_rsa_authorized_true(self):
del self.data['auth_jwe']
self.json_loads_patch.return_value['response'] = True
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.authorized, True)
def test_authorization_response_response_rsa_authorized_false(self):
del self.data['auth_jwe']
self.json_loads_patch.return_value['response'] = False
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.authorized, False)
@data(Invalid, TypeError, ValueError)
def test_authorization_response_response_rsa_unexpected_device_response(
self, exc):
del self.data['auth_jwe']
self.transport.decrypt_rsa_response.side_effect = exc
with self.assertRaises(UnexpectedDeviceResponse):
AuthorizationResponse(self.data, self.transport)
@ddt
class TestAuthorizationResponseAuthPolicy(unittest.TestCase):
def setUp(self):
self.data = {
"auth": "auth data",
"auth_jwe": "auth jwe data",
"service_user_hash": "vf8fg663aauTkVUFCiR0Er6kctIN9d6958hkzznVHF9",
"user_push_id": "399e1d6c-f651-5b82-9dff-d5d63f16c849",
"org_user_hash": "SlwSGZz0M9kPtZUL6mzAGjdYcmdUS1jHccRKOJ9rTMO",
"public_key_id": "56:66:9d:72:f8:c3:e0:0b:3d:52:f4:81:36:f1:cc:74"
}
self.transport = MagicMock(spec=JOSETransport)
self.transport.decrypt_response.return_value = "{}"
self.json_loads_patch = patch(
"launchkey.entities.service.loads").start()
self.addCleanup(patch.stopall)
self.json_loads_patch.return_value = {
"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008",
"type": "AUTHORIZED",
"reason": "APPROVED",
"denial_reason": "32",
"service_pins": ["1", "2", "3"],
"device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008",
"auth_policy": {
"requirement": None,
"geofences": [
{"latitude": 36.083548, "longitude": -115.157517, "radius": 150, "name": "work"}
]
},
"auth_methods": [
{"method": "wearables", "set": False, "active": False, "allowed": True, "supported": True, "user_required": None, "passed": None, "error": None },
{"method": "geofencing", "set": None, "active": True, "allowed": True, "supported": True, "user_required": None, "passed": None, "error": None },
{"method": "locations", "set": False, "active": False, "allowed": True, "supported": True, "user_required": None, "passed": None, "error": None },
{"method": "pin_code", "set": True, "active": True, "allowed": True, "supported": True, "user_required": False, "passed": None, "error": None },
{"method": "circle_code", "set": True, "active": True, "allowed": True, "supported": True, "user_required": False, "passed": None, "error": None },
{"method": "face", "set": False, "active": False, "allowed": True, "supported": True, "user_required": None, "passed": None, "error": None },
{"method": "fingerprint", "set": False, "active": False, "allowed": True, "supported": True, "user_required": None, "passed": None, "error": None }
]
}
def test_missing_auth_policy(self):
del self.json_loads_patch.return_value['auth_policy']
response = AuthorizationResponse(self.data, self.transport)
self.assertIsNone(response.auth_policy)
def test_geofence_auth_policy(self):
self.json_loads_patch.return_value['auth_policy']['geofences'] = [
{"latitude": 36.083548, "longitude": -115.157517, "radius": 150,
"name": "work"},
{"latitude": 40.55, "longitude": -90.12, "radius": 100,
"name": "home"}
]
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(
response.auth_policy.geofences,
[
GeoFence(latitude=36.083548, longitude=-115.157517,
radius=150.0, name="work"),
GeoFence(latitude=40.55, longitude=-90.12, radius=100.0,
name="home")
]
)
def test_empty_geofence_auth_policy(self):
self.json_loads_patch.return_value['auth_policy']['geofences'] = []
response = AuthorizationResponse(self.data, self.transport)
self.assertIsNone(response.auth_policy)
def test_missing_geofence_auth_policy(self):
del self.json_loads_patch.return_value['auth_policy']['geofences']
response = AuthorizationResponse(self.data, self.transport)
self.assertIsNone(response.auth_policy)
def test_invalid_requirement(self):
self.json_loads_patch.return_value['auth_policy']['requirement'] = 'invalid'
self.json_loads_patch.return_value['auth_policy']['types'] = ['knowledge', 'inherence']
self.json_loads_patch.return_value['auth_policy']['amount'] = 2
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
self.assertEqual(
response.auth_policy.minimum_amount,
0
)
def test_types_requirement(self):
self.json_loads_patch.return_value['auth_policy']['requirement'] = 'types'
self.json_loads_patch.return_value['auth_policy']['types'] = ['knowledge', 'inherence']
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(
response.auth_policy.minimum_requirements,
['knowledge', 'inherence']
)
self.assertEqual(
response.auth_policy.minimum_amount,
0
)
def test_types_requirement_with_amount_included(self):
self.json_loads_patch.return_value['auth_policy']['requirement'] = 'types'
self.json_loads_patch.return_value['auth_policy']['types'] = ['inherence']
self.json_loads_patch.return_value['auth_policy']['amount'] = 3
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(
response.auth_policy.minimum_requirements,
['inherence']
)
self.assertEqual(
response.auth_policy.minimum_amount,
0
)
@patch('launchkey.entities.service.warnings')
def test_types_requirement_invalid_type(self, warnings_patch):
self.json_loads_patch.return_value['auth_policy']['requirement'] = 'types'
self.json_loads_patch.return_value['auth_policy']['types'] = ['knowledge', 'invalid']
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(
response.auth_policy.minimum_requirements,
['knowledge']
)
warnings_patch.warn.assert_called_once()
def test_amount_requirement(self):
self.json_loads_patch.return_value['auth_policy']['requirement'] = 'amount'
self.json_loads_patch.return_value['auth_policy']['amount'] = 3
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(
response.auth_policy.minimum_amount,
3
)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
def test_amount_requirement_with_types_included(self):
self.json_loads_patch.return_value['auth_policy']['requirement'] = 'amount'
self.json_loads_patch.return_value['auth_policy']['amount'] = 3
self.json_loads_patch.return_value['auth_policy']['types'] = ['knowledge', 'invalid']
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(
response.auth_policy.minimum_amount,
3
)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
def test_auth_policy_repr_default(self):
auth_policy = AuthPolicy()
self.assertEqual(
str(auth_policy),
"AuthPolicy <minimum_requirements=[], minimum_amount=0, "
"geofences=[]>"
)
def test_auth_policy_min_amount_repr(self):
auth_policy = AuthPolicy(any=3)
self.assertEqual(
str(auth_policy),
"AuthPolicy <minimum_requirements=[], "
"minimum_amount=3, geofences=[]>"
)
def test_auth_policy_min_requirements_repr(self):
auth_policy = AuthPolicy(knowledge=True, inherence=True, possession=True)
self.assertEqual(
str(auth_policy),
"AuthPolicy <minimum_requirements="
"['knowledge', 'inherence', 'possession'], "
"minimum_amount=0, geofences=[]>"
)
def test_auth_policy_repr_geofences(self):
auth_policy = AuthPolicy()
auth_policy.add_geofence(1, 2, 3)
auth_policy.add_geofence(4.1, 5.2, 6.3, name='test')
self.assertEqual(
str(auth_policy),
'AuthPolicy <minimum_requirements=[], minimum_amount=0, '
'geofences=[GeoFence <name="None", latitude=1.0, longitude=2.0, '
'radius=3.0>, GeoFence <name="test", latitude=4.1, longitude=5.2, '
'radius=6.3>]>'
)
@ddt
class TestAuthorizationResponseAuthMethodInsight(unittest.TestCase):
def setUp(self):
self.data = {
"auth": "auth data",
"auth_jwe": "auth jwe data",
"service_user_hash": "vf8fg663aauTkVUFCiR0Er6kctIN9d6958hkzznVHF9",
"user_push_id": "399e1d6c-f651-5b82-9dff-d5d63f16c849",
"org_user_hash": "SlwSGZz0M9kPtZUL6mzAGjdYcmdUS1jHccRKOJ9rTMO",
"public_key_id": "56:66:9d:72:f8:c3:e0:0b:3d:52:f4:81:36:f1:cc:74"
}
self.transport = MagicMock(spec=JOSETransport)
self.transport.decrypt_response.return_value = "{}"
def test_missing_auth_methods(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "AUTHORIZED", "reason": "APPROVED", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "types", "types": ["possession"], "geofences": [] } }'
response = AuthorizationResponse(self.data, self.transport)
self.assertIsNone(response.auth_methods)
def test_unknown_auth_method(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "AUTHORIZED", "reason": "APPROVED", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": null, "geofences": [ ] }, "auth_methods": [{"method": "wearables", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": true, "error": false }, {"method": "pin_code", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": true, "error": false }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "something_new", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
unknown = response.auth_methods[7]
self.assertEqual(unknown.method, AuthMethodType.OTHER)
self.assertFalse(unknown.set)
self.assertFalse(unknown.active)
self.assertTrue(unknown.allowed)
self.assertTrue(unknown.supported)
self.assertIsNone(unknown.user_required)
self.assertIsNone(unknown.passed)
self.assertIsNone(unknown.error)
def test_1_pin_code_and_locations_success(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "AUTHORIZED", "reason": "APPROVED", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": null, "geofences": [ ] }, "auth_methods": [{"method": "wearables", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": true, "error": false }, {"method": "pin_code", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": true, "error": false }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.AUTHORIZED)
self.assertEqual(response.reason, AuthResponseReason.APPROVED)
locations = response.auth_methods[2]
self.assertEqual(locations.method, AuthMethodType.LOCATIONS)
self.assertTrue(locations.set)
self.assertTrue(locations.active)
self.assertTrue(locations.allowed)
self.assertTrue(locations.supported)
self.assertTrue(locations.user_required)
self.assertTrue(locations.passed)
self.assertFalse(locations.error)
pin_code = response.auth_methods[3]
self.assertEqual(pin_code.method, AuthMethodType.PIN_CODE)
self.assertTrue(pin_code.set)
self.assertTrue(pin_code.active)
self.assertTrue(pin_code.allowed)
self.assertTrue(pin_code.supported)
self.assertTrue(pin_code.user_required)
self.assertTrue(pin_code.passed)
self.assertFalse(pin_code.error)
self.assertIsNone(response.auth_policy)
def test_2_location_failure_unchecked_pincode(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "AUTHENTICATION", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": null, "geofences": [ ] }, "auth_methods": [{"method": "wearables", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": false, "error": false }, {"method": "pin_code", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": null }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.AUTHENTICATION)
locations = response.auth_methods[2]
self.assertEqual(locations.method, AuthMethodType.LOCATIONS)
self.assertTrue(locations.set)
self.assertTrue(locations.active)
self.assertTrue(locations.allowed)
self.assertTrue(locations.supported)
self.assertTrue(locations.user_required)
self.assertFalse(locations.passed)
self.assertFalse(locations.error)
pin_code = response.auth_methods[3]
self.assertEqual(pin_code.method, AuthMethodType.PIN_CODE)
self.assertTrue(pin_code.set)
self.assertTrue(pin_code.active)
self.assertTrue(pin_code.allowed)
self.assertTrue(pin_code.supported)
self.assertTrue(pin_code.user_required)
self.assertIsNone(pin_code.passed)
self.assertIsNone(pin_code.error)
self.assertIsNone(response.auth_policy)
def test_3_possession_failure_unchecked_circle_code(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "POLICY", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "types", "types": ["possession"], "geofences": [ ] }, "auth_methods": [{"method": "wearables", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "pin_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "circle_code", "set": true, "active": true, "allowed": true, "supported": true, "user_required": false, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.POLICY)
circle_code = response.auth_methods[4]
self.assertEqual(circle_code.method, AuthMethodType.CIRCLE_CODE)
self.assertTrue(circle_code.set)
self.assertTrue(circle_code.active)
self.assertTrue(circle_code.allowed)
self.assertTrue(circle_code.supported)
self.assertFalse(circle_code.user_required)
self.assertIsNone(circle_code.passed)
self.assertIsNone(circle_code.error)
self.assertEqual(
response.auth_policy.geofences,
[]
)
self.assertEqual(
response.auth_policy.minimum_amount,
0
)
self.assertEqual(
response.auth_policy.minimum_requirements,
['possession']
)
def test_4_amount_failure_unchecked_fingerprint(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "POLICY", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "amount", "amount": 2, "geofences": [ ] }, "auth_methods": [{"method": "wearables", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "pin_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.POLICY)
fingerprint = response.auth_methods[6]
self.assertEqual(fingerprint.method, AuthMethodType.FINGERPRINT)
self.assertTrue(fingerprint.set)
self.assertTrue(fingerprint.active)
self.assertTrue(fingerprint.allowed)
self.assertTrue(fingerprint.supported)
self.assertTrue(fingerprint.user_required)
self.assertIsNone(fingerprint.passed)
self.assertIsNone(fingerprint.error)
self.assertEqual(
response.auth_policy.geofences,
[]
)
self.assertEqual(
response.auth_policy.minimum_amount,
2
)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
def test_5_amount_success_failed_wearable_sensor_unchecked_fingerprint(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "SENSOR", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "amount", "amount": 2, "geofences": [ ] }, "auth_methods": [{"method": "wearables", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": true }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "pin_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.SENSOR)
wearable = response.auth_methods[0]
self.assertEqual(wearable.method, AuthMethodType.WEARABLES)
self.assertTrue(wearable.set)
self.assertTrue(wearable.active)
self.assertTrue(wearable.allowed)
self.assertTrue(wearable.supported)
self.assertTrue(wearable.user_required)
self.assertIsNone(wearable.passed)
self.assertTrue(wearable.error)
fingerprint = response.auth_methods[6]
self.assertEqual(fingerprint.method, AuthMethodType.FINGERPRINT)
self.assertTrue(fingerprint.set)
self.assertTrue(fingerprint.active)
self.assertTrue(fingerprint.allowed)
self.assertTrue(fingerprint.supported)
self.assertTrue(fingerprint.user_required)
self.assertIsNone(fingerprint.passed)
self.assertIsNone(fingerprint.error)
self.assertEqual(
response.auth_policy.geofences,
[]
)
self.assertEqual(
response.auth_policy.minimum_amount,
2
)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
def test_6_required_amount_2_failed_wearable_unchecked_location_unchecked_fingerprint(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "AUTHENTICATION", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "amount", "amount": 2, "geofences": [ ] }, "auth_methods": [{"method": "wearables", "set": true, "active": true, "allowed": true, "supported": true, "user_required": false, "passed": false, "error": false }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": true, "active": true, "allowed": true, "supported": true, "user_required": false, "passed": null, "error": null }, {"method": "pin_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.AUTHENTICATION)
wearable = response.auth_methods[0]
self.assertEqual(wearable.method, AuthMethodType.WEARABLES)
self.assertTrue(wearable.set)
self.assertTrue(wearable.active)
self.assertTrue(wearable.allowed)
self.assertTrue(wearable.supported)
self.assertFalse(wearable.user_required)
self.assertFalse(wearable.passed)
self.assertFalse(wearable.error)
locations = response.auth_methods[2]
self.assertEqual(locations.method, AuthMethodType.LOCATIONS)
self.assertTrue(locations.set)
self.assertTrue(locations.active)
self.assertTrue(locations.allowed)
self.assertTrue(locations.supported)
self.assertFalse(locations.user_required)
self.assertIsNone(locations.passed)
self.assertIsNone(locations.error)
fingerprint = response.auth_methods[6]
self.assertEqual(fingerprint.method, AuthMethodType.FINGERPRINT)
self.assertTrue(fingerprint.set)
self.assertTrue(fingerprint.active)
self.assertTrue(fingerprint.allowed)
self.assertTrue(fingerprint.supported)
self.assertTrue(fingerprint.user_required)
self.assertIsNone(fingerprint.passed)
self.assertIsNone(fingerprint.error)
self.assertEqual(
response.auth_policy.geofences,
[]
)
self.assertEqual(
response.auth_policy.minimum_amount,
2
)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
def test_7_required_amount_2_successful_fingerprint_successful_locations_unchecked_wearable(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "AUTHORIZED", "reason": "APPROVED", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "amount", "amount": 2, "geofences": [ ] }, "auth_methods": [{"method": "wearables", "set": true, "active": true, "allowed": true, "supported": true, "user_required": false, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": true, "error": false }, {"method": "pin_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": true, "error": false } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.AUTHORIZED)
self.assertEqual(response.reason, AuthResponseReason.APPROVED)
wearable = response.auth_methods[0]
self.assertEqual(wearable.method, AuthMethodType.WEARABLES)
self.assertTrue(wearable.set)
self.assertTrue(wearable.active)
self.assertTrue(wearable.allowed)
self.assertTrue(wearable.supported)
self.assertFalse(wearable.user_required)
self.assertIsNone(wearable.passed)
self.assertIsNone(wearable.error)
locations = response.auth_methods[2]
self.assertEqual(locations.method, AuthMethodType.LOCATIONS)
self.assertTrue(locations.set)
self.assertTrue(locations.active)
self.assertTrue(locations.allowed)
self.assertTrue(locations.supported)
self.assertTrue(locations.user_required)
self.assertTrue(locations.passed)
self.assertFalse(locations.error)
fingerprint = response.auth_methods[6]
self.assertEqual(fingerprint.method, AuthMethodType.FINGERPRINT)
self.assertTrue(fingerprint.set)
self.assertTrue(fingerprint.active)
self.assertTrue(fingerprint.allowed)
self.assertTrue(fingerprint.supported)
self.assertTrue(fingerprint.user_required)
self.assertTrue(fingerprint.passed)
self.assertFalse(fingerprint.error)
self.assertEqual(
response.auth_policy.geofences,
[]
)
self.assertEqual(
response.auth_policy.minimum_amount,
2
)
def test_8_required_amount_3_passed_geofence_failed_amount_skipped_face_skipped_pin(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "POLICY", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "amount", "amount": 3, "geofences": [{"latitude": 36.083548, "longitude": -115.157517, "radius": 150, "name": "work"} ] }, "auth_methods": [{"method": "wearables", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "pin_code", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": null }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": null }, {"method": "fingerprint", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.POLICY)
geofencing = response.auth_methods[1]
self.assertEqual(geofencing.method, AuthMethodType.GEOFENCING)
self.assertIsNone(geofencing.set)
self.assertTrue(geofencing.active)
self.assertTrue(geofencing.allowed)
self.assertTrue(geofencing.supported)
self.assertIsNone(geofencing.user_required)
self.assertIsNone(geofencing.passed)
self.assertIsNone(geofencing.error)
pin_code = response.auth_methods[3]
self.assertEqual(pin_code.method, AuthMethodType.PIN_CODE)
self.assertTrue(pin_code.set)
self.assertTrue(pin_code.active)
self.assertTrue(pin_code.allowed)
self.assertTrue(pin_code.supported)
self.assertTrue(pin_code.user_required)
self.assertIsNone(pin_code.passed)
self.assertIsNone(pin_code.error)
face = response.auth_methods[5]
self.assertEqual(face.method, AuthMethodType.FACE)
self.assertTrue(face.set)
self.assertTrue(face.active)
self.assertTrue(face.allowed)
self.assertTrue(face.supported)
self.assertTrue(face.user_required)
self.assertIsNone(face.passed)
self.assertIsNone(face.error)
self.assertEqual(
response.auth_policy.geofences,
[
GeoFence(latitude=36.083548, longitude=-115.157517,
radius=150.0, name="work")
]
)
self.assertEqual(
response.auth_policy.minimum_amount,
3
)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
def test_9_required_amount_2_failed_geofence_unchecked_face_unchecked_pin(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "AUTHENTICATION", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "amount", "amount": 2, "geofences": [{"latitude": 36.083548, "longitude": -115.157517, "radius": 150, "name": "work"} ] }, "auth_methods": [{"method": "wearables", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": false, "error": false }, {"method": "locations", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "pin_code", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": null }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": null }, {"method": "fingerprint", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.AUTHENTICATION)
geofencing = response.auth_methods[1]
self.assertEqual(geofencing.method, AuthMethodType.GEOFENCING)
self.assertIsNone(geofencing.set)
self.assertTrue(geofencing.active)
self.assertTrue(geofencing.allowed)
self.assertTrue(geofencing.supported)
self.assertIsNone(geofencing.user_required)
self.assertFalse(geofencing.passed)
self.assertFalse(geofencing.error)
pin_code = response.auth_methods[3]
self.assertEqual(pin_code.method, AuthMethodType.PIN_CODE)
self.assertTrue(pin_code.set)
self.assertTrue(pin_code.active)
self.assertTrue(pin_code.allowed)
self.assertTrue(pin_code.supported)
self.assertTrue(pin_code.user_required)
self.assertIsNone(pin_code.passed)
self.assertIsNone(pin_code.error)
face = response.auth_methods[5]
self.assertEqual(face.method, AuthMethodType.FACE)
self.assertTrue(face.set)
self.assertTrue(face.active)
self.assertTrue(face.allowed)
self.assertTrue(face.supported)
self.assertTrue(face.user_required)
self.assertIsNone(face.passed)
self.assertIsNone(face.error)
self.assertEqual(
response.auth_policy.geofences,
[
GeoFence(latitude=36.083548, longitude=-115.157517,
radius=150.0, name="work")
]
)
self.assertEqual(
response.auth_policy.minimum_amount,
2
)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
def test_10_location_failure_unchecked_fingerprint_passed_geofence(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "AUTHENTICATION", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": null, "geofences": [{"latitude": 36.083548, "longitude": -115.157517, "radius": 150, "name": "work"} ] }, "auth_methods": [{"method": "wearables", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": true, "error": false }, {"method": "locations", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": false, "error": false }, {"method": "pin_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": true, "active": true, "allowed": true, "supported": true, "user_required": true, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.AUTHENTICATION)
geofencing = response.auth_methods[1]
self.assertEqual(geofencing.method, AuthMethodType.GEOFENCING)
self.assertIsNone(geofencing.set)
self.assertTrue(geofencing.active)
self.assertTrue(geofencing.allowed)
self.assertTrue(geofencing.supported)
self.assertIsNone(geofencing.user_required)
self.assertTrue(geofencing.passed)
self.assertFalse(geofencing.error)
locations = response.auth_methods[2]
self.assertEqual(locations.method, AuthMethodType.LOCATIONS)
self.assertTrue(locations.set)
self.assertTrue(locations.active)
self.assertTrue(locations.allowed)
self.assertTrue(locations.supported)
self.assertTrue(locations.user_required)
self.assertFalse(locations.passed)
self.assertFalse(locations.error)
fingerprint = response.auth_methods[6]
self.assertEqual(fingerprint.method, AuthMethodType.FINGERPRINT)
self.assertTrue(fingerprint.set)
self.assertTrue(fingerprint.active)
self.assertTrue(fingerprint.allowed)
self.assertTrue(fingerprint.supported)
self.assertTrue(fingerprint.user_required)
self.assertIsNone(fingerprint.passed)
self.assertIsNone(fingerprint.error)
self.assertEqual(
response.auth_policy.geofences,
[
GeoFence(latitude=36.083548, longitude=-115.157517,
radius=150.0, name="work")
]
)
self.assertEqual(
response.auth_policy.minimum_amount,
0
)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
def test_11_required_possession_failure_unchecked_pin_unchecked_circle_code(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "POLICY", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "types", "types": ["possession"], "geofences": [] }, "auth_methods": [{"method": "wearables", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "pin_code", "set": true, "active": true, "allowed": true, "supported": true, "user_required": false, "passed": null, "error": null }, {"method": "circle_code", "set": true, "active": true, "allowed": true, "supported": true, "user_required": false, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.POLICY)
pin_code = response.auth_methods[3]
self.assertEqual(pin_code.method, AuthMethodType.PIN_CODE)
self.assertTrue(pin_code.set)
self.assertTrue(pin_code.active)
self.assertTrue(pin_code.allowed)
self.assertTrue(pin_code.supported)
self.assertFalse(pin_code.user_required)
self.assertIsNone(pin_code.passed)
self.assertIsNone(pin_code.error)
circle_code = response.auth_methods[4]
self.assertEqual(circle_code.method, AuthMethodType.CIRCLE_CODE)
self.assertTrue(circle_code.set)
self.assertTrue(circle_code.active)
self.assertTrue(circle_code.allowed)
self.assertTrue(circle_code.supported)
self.assertFalse(circle_code.user_required)
self.assertIsNone(circle_code.passed)
self.assertIsNone(circle_code.error)
self.assertEqual(
response.auth_policy.geofences,
[]
)
self.assertEqual(
response.auth_policy.minimum_amount,
0
)
self.assertEqual(
response.auth_policy.minimum_requirements,
['possession']
)
def test_12_required_amount_1_failed_wearable_sensor_unchecked_locations(self):
self.transport.decrypt_response.return_value = '{"auth_request": "62e09ff8-f9a9-11e8-bbe2-0242ac130008", "type": "FAILED", "reason": "SENSOR", "denial_reason": "32", "service_pins": ["1", "2", "3"], "device_id": "31e5b804-f9a7-11e8-97ef-0242ac130008", "auth_policy": {"requirement": "amount", "amount": 1, "geofences": [] }, "auth_methods": [{"method": "wearables", "set": true, "active": true, "allowed": true, "supported": true, "user_required": false, "passed": null, "error": true }, {"method": "geofencing", "set": null, "active": true, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "locations", "set": true, "active": true, "allowed": true, "supported": true, "user_required": false, "passed": null, "error": null }, {"method": "pin_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "circle_code", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "face", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null }, {"method": "fingerprint", "set": false, "active": false, "allowed": true, "supported": true, "user_required": null, "passed": null, "error": null } ] }'
response = AuthorizationResponse(self.data, self.transport)
self.assertEqual(response.type, AuthResponseType.FAILED)
self.assertEqual(response.reason, AuthResponseReason.SENSOR)
wearable = response.auth_methods[0]
self.assertEqual(wearable.method, AuthMethodType.WEARABLES)
self.assertTrue(wearable.set)
self.assertTrue(wearable.active)
self.assertTrue(wearable.allowed)
self.assertTrue(wearable.supported)
self.assertFalse(wearable.user_required)
self.assertIsNone(wearable.passed)
self.assertTrue(wearable.error)
locations = response.auth_methods[2]
self.assertEqual(locations.method, AuthMethodType.LOCATIONS)
self.assertTrue(locations.set)
self.assertTrue(locations.active)
self.assertTrue(locations.allowed)
self.assertTrue(locations.supported)
self.assertFalse(locations.user_required)
self.assertIsNone(locations.passed)
self.assertIsNone(locations.error)
self.assertEqual(
response.auth_policy.geofences,
[]
)
self.assertEqual(
response.auth_policy.minimum_amount,
1
)
self.assertEqual(
response.auth_policy.minimum_requirements,
[]
)
class TestGeoFence(unittest.TestCase):
def test_equals_same_geofence(self):
geo_1 = GeoFence(1, 2, 3, name='name')
geo_2 = GeoFence(1, 2, 3, name='name')
self.assertTrue(geo_1 == geo_2)
def test_different_lat(self):
geo_1 = GeoFence(1, 2, 3, name='name')
geo_2 = GeoFence(2, 2, 3, name='name')
self.assertFalse(geo_1 == geo_2)
def test_different_long(self):
geo_1 = GeoFence(1, 2, 3, name='name')
geo_2 = GeoFence(1, 3, 3, name='name')
self.assertFalse(geo_1 == geo_2)
def test_different_radius(self):
geo_1 = GeoFence(1, 2, 3, name='name')
geo_2 = GeoFence(1, 2, 4, name='name')
self.assertFalse(geo_1 == geo_2)
def test_different_name(self):
geo_1 = GeoFence(1, 2, 3, name='name')
geo_2 = GeoFence(1, 2, 3, name='name2')
self.assertFalse(geo_1 == geo_2)
def test_not_equal(self):
geofence = GeoFence(1, 2, 3, name='name')
geofence_2 = GeoFence(2, 2, 3, name='name')
self.assertTrue(geofence != geofence_2)
def test_different_type(self):
geofence = GeoFence(1, 2, 3, name='name')
timefence = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertFalse(geofence == timefence)
def test_repr(self):
geo_1 = GeoFence(1, 2, 3, name='My Name')
self.assertEqual(
str(geo_1),
'GeoFence <name="My Name", latitude=1.0, '
'longitude=2.0, radius=3.0>'
)
class TestTimeFence(unittest.TestCase):
def test_equals_same_timefence(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertTrue(fence_1 == fence_2)
def test_different_name(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name2",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertFalse(fence_1 == fence_2)
def test_different_start_time(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=5),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertFalse(fence_1 == fence_2)
def test_different_end_time(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=5),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertFalse(fence_1 == fence_2)
def test_different_monday(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=False,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertFalse(fence_1 == fence_2)
def test_different_tuesday(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=False,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertFalse(fence_1 == fence_2)
def test_different_wednesday(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=False,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertFalse(fence_1 == fence_2)
def test_different_thursday(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=False,
friday=True,
saturday=True,
sunday=True
)
self.assertFalse(fence_1 == fence_2)
def test_different_friday(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=False,
saturday=True,
sunday=True
)
self.assertFalse(fence_1 == fence_2)
def test_different_saturday(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=False,
sunday=True
)
self.assertFalse(fence_1 == fence_2)
def test_different_sunday(self):
fence_1 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
fence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=False
)
self.assertFalse(fence_1 == fence_2)
def test_not_equal(self):
timefence = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
timefence_2 = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=False,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertTrue(timefence != timefence_2)
def test_different_type(self):
timefence = TimeFence(
"Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
geofence = GeoFence(1, 2, 3, "Name")
self.assertFalse(timefence == geofence)
def test_repr(self):
fence_1 = TimeFence(
"My Name",
time(hour=1, minute=2),
time(hour=3, minute=4),
monday=True,
tuesday=True,
wednesday=True,
thursday=True,
friday=True,
saturday=True,
sunday=True
)
self.assertEqual(
str(fence_1),
'TimeFence <name="My Name", start_time="01:02:00", '
'end_time="03:04:00", monday=True, tuesday=True, wednesday=True, '
'thursday=True, friday=True, saturday=True, sunday=True>'
)
class TestAuthorizationRequest(unittest.TestCase):
def test_repr(self):
auth_request = AuthorizationRequest('auth', 'package')
self.assertEqual(
str(auth_request),
'AuthorizationRequest <auth_request="auth", '
'push_package="package">'
)
class TestAuthMethod(unittest.TestCase):
def test_repr(self):
auth_method = AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
False,
False,
True,
True,
False
)
self.assertEqual(
str(auth_method),
'AuthMethod <method=FINGERPRINT, set=True, active=True, '
'allowed=False, supported=False, user_required=True, passed=True, '
'error=False>'
)
def test_eq_match(self):
self.assertEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
)
)
def test_eq_different_method(self):
self.assertNotEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
AuthMethod(
AuthMethodType.FACE,
True,
True,
True,
True,
True,
True,
True
)
)
def test_eq_different_set(self):
self.assertNotEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
AuthMethod(
AuthMethodType.FINGERPRINT,
False,
True,
True,
True,
True,
True,
True
)
)
def test_eq_different_active(self):
self.assertNotEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
False,
True,
True,
True,
True,
True
)
)
def test_eq_different_allowed(self):
self.assertNotEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
False,
True,
True,
True,
True
)
)
def test_eq_different_supported(self):
self.assertNotEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
False,
True,
True,
True
)
)
def test_eq_different_user_required(self):
self.assertNotEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
False,
True,
True
)
)
def test_eq_different_user_passed(self):
self.assertNotEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
False,
True
)
)
def test_eq_different_user_error(self):
self.assertNotEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
False
)
)
def test_eq_different_object(self):
self.assertNotEqual(
AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
),
True
)
def test_not_equal(self):
method_1 = AuthMethod(
AuthMethodType.FINGERPRINT,
True,
True,
True,
True,
True,
True,
True
)
method_2 = AuthMethod(
AuthMethodType.FINGERPRINT,
False,
True,
True,
True,
True,
True,
True
)
self.assertTrue(
method_1 != method_2
)
| 46.438083
| 1,518
| 0.598103
| 7,494
| 74,626
| 5.803042
| 0.03336
| 0.043782
| 0.04553
| 0.054636
| 0.919012
| 0.913194
| 0.90356
| 0.888636
| 0.875437
| 0.868056
| 0
| 0.032026
| 0.266529
| 74,626
| 1,606
| 1,519
| 46.466999
| 0.762478
| 0
| 0
| 0.746516
| 0
| 0.013271
| 0.304532
| 0.031986
| 0
| 0
| 0
| 0
| 0.246848
| 1
| 0.052422
| false
| 0.034506
| 0.005309
| 0
| 0.062376
| 0.042468
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1b18a0a25fc7da3157e6dfc4b436a13dc71c7c39
| 8,134
|
py
|
Python
|
env/track.py
|
cobu93/gym-racetrack
|
321d0c1eac388cf55370f045a84daa3e396b0351
|
[
"MIT"
] | null | null | null |
env/track.py
|
cobu93/gym-racetrack
|
321d0c1eac388cf55370f045a84daa3e396b0351
|
[
"MIT"
] | null | null | null |
env/track.py
|
cobu93/gym-racetrack
|
321d0c1eac388cf55370f045a84daa3e396b0351
|
[
"MIT"
] | 1
|
2022-03-21T06:41:35.000Z
|
2022-03-21T06:41:35.000Z
|
import numpy as np
np.random.seed(0)
test_track = []
for row in range(100):
test_track.append([])
for column in range(50):
if column > 10 and column < 45:
if row >= 1:
value = np.random.choice([2, 3], p=[0.9, 0.1])
test_track[row].append(value)
else:
test_track[row].append(4)
else:
test_track[row].append(1)
track_1 = [[1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 2, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 2, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1]]
track_2 = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
track_3 = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
| 67.22314
| 108
| 0.331694
| 2,530
| 8,134
| 1.063241
| 0.011858
| 0.858736
| 1.158736
| 1.379926
| 0.936803
| 0.920446
| 0.920446
| 0.920446
| 0.920446
| 0.916729
| 0
| 0.459689
| 0.335136
| 8,134
| 120
| 109
| 67.783333
| 0.037722
| 0
| 0
| 0.679245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009434
| 0
| 0.009434
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
1b4d3d964fbd150f9b54b97efd009b5188aaf893
| 8,181
|
py
|
Python
|
utils/data.py
|
Roozbeh-Bazargani/CPSC-533R-project
|
453f093b23d2363f09c61079d1d4fbd878abf3be
|
[
"MIT"
] | null | null | null |
utils/data.py
|
Roozbeh-Bazargani/CPSC-533R-project
|
453f093b23d2363f09c61079d1d4fbd878abf3be
|
[
"MIT"
] | null | null | null |
utils/data.py
|
Roozbeh-Bazargani/CPSC-533R-project
|
453f093b23d2363f09c61079d1d4fbd878abf3be
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from torch.utils.data import Dataset
import pickle
from matplotlib import pyplot as plt
#0 left hip
#1 left knee
#2 left foot
#3 right hip
#4 right knee
#5 right foot
#6 middle hip
#7 neck
#8 nose
#9 head
#10 left shoulder
#11 left elbow
#12 left wrist
#13 right shoulder
#14 right elbow
#15 right wrist
#SUBJECT 8 WILL BE VALIDATION !!!!!!!!! [1, 5, 6, 7, 8]
class H36MDataset_pair(Dataset):
"""Human3.6M dataset including images."""
def __init__(self, fname, normalize_2d=True, subjects=[1, 5, 6, 7], offset = 5):
pickle_off = open(fname, "rb")
self.data = pickle.load(pickle_off)
self.offset = offset
# select subjects
selection_array = np.zeros(len(self.data['subjects']), dtype=bool)
for s in subjects:
selection_array = np.logical_or(selection_array, (np.array(self.data['subjects']) == s))
self.data['subjects'] = list(np.array(self.data['subjects'])[selection_array])
cams = ['54138969', '55011271', '58860488', '60457274']
for cam in cams:
self.data['poses_2d_pred'][cam] = self.data['poses_2d_pred'][cam][selection_array]
self.data['confidences'][cam] = self.data['confidences'][cam][selection_array]
if normalize_2d:
'''
print(self.data['poses_2d_pred'][cam].shape)
print(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16).shape)
print(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0], " POSE")
'''
self.data['poses_2d_pred'][cam] = (self.data['poses_2d_pred'][cam].reshape(-1, 2, 16) -
self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[:, :, [6]]).reshape(-1, 32)
###
'''
fig, ax1 = plt.subplots(1, 1)
fig.suptitle(' LOSS')
for i in range(0, 16):
#print(poses_2d['cam0'][0][i].cpu().detach().numpy().item(), " HIIIII")
print(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0].shape)
ax1.scatter(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][0][i], self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][1][i])
ax1.annotate(str(i), (self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][0][i], self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][1][i]))
plt.show()
'''
###
self.data['poses_2d_pred'][cam] /= np.linalg.norm(self.data['poses_2d_pred'][cam], ord=2, axis=1, keepdims=True)
#print(self.data['poses_2d_pred'][cam].shape)
'''
for i in range(0, 16):
#print(poses_2d['cam0'][0][i].cpu().detach().numpy().item(), " HIIIII")
print(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0].shape)
ax1.scatter(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][0][i], self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][1][i])
ax1.annotate(str(i), (self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][0][i], self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][1][i]))
plt.show()
stop
'''
def __len__(self):
return self.data['poses_2d_pred']['54138969'].shape[0] - self.offset
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
idx1 = idx + self.offset
sample = dict()
cams = ['54138969']#, '55011271', '58860488', '60457274']
for c_idx, cam in enumerate(cams):
p2d = torch.Tensor(self.data['poses_2d_pred'][cam][idx].astype('float32')).cuda()
sample['cam' + str(c_idx)] = p2d
sample['confidences'] = dict()
for cam in cams:
sample['confidences'][cam] = torch.Tensor(self.data['confidences'][cam][idx].astype('float32')).cuda()
sample['subjects'] = self.data['subjects'][idx]
#########################
sample1 = dict()
for c_idx, cam in enumerate(cams):
p2d = torch.Tensor(self.data['poses_2d_pred'][cam][idx1].astype('float32')).cuda()
sample1['cam' + str(c_idx)] = p2d
sample1['confidences'] = dict()
for cam in cams:
sample1['confidences'][cam] = torch.Tensor(self.data['confidences'][cam][idx1].astype('float32')).cuda()
sample1['subjects'] = self.data['subjects'][idx1]
return {'0': sample, '1': sample1}
class H36MDataset(Dataset):
"""Human3.6M dataset including images."""
def __init__(self, fname, normalize_2d=True, subjects=[1, 5, 6, 7, 8]):
pickle_off = open(fname, "rb")
self.data = pickle.load(pickle_off)
# select subjects
selection_array = np.zeros(len(self.data['subjects']), dtype=bool)
for s in subjects:
selection_array = np.logical_or(selection_array, (np.array(self.data['subjects']) == s))
self.data['subjects'] = list(np.array(self.data['subjects'])[selection_array])
cams = ['54138969', '55011271', '58860488', '60457274']
for cam in cams:
self.data['poses_2d_pred'][cam] = self.data['poses_2d_pred'][cam][selection_array]
self.data['confidences'][cam] = self.data['confidences'][cam][selection_array]
if normalize_2d:
print(self.data['poses_2d_pred'][cam].shape)
print(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16).shape)
print(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0], " POSE")
self.data['poses_2d_pred'][cam] = (self.data['poses_2d_pred'][cam].reshape(-1, 2, 16) -
self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[:, :, [6]]).reshape(-1, 32)
###
'''
fig, ax1 = plt.subplots(1, 1)
fig.suptitle(' LOSS')
for i in range(0, 16):
#print(poses_2d['cam0'][0][i].cpu().detach().numpy().item(), " HIIIII")
print(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0].shape)
ax1.scatter(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][0][i], self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][1][i])
ax1.annotate(str(i), (self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][0][i], self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][1][i]))
plt.show()
'''
###
self.data['poses_2d_pred'][cam] /= np.linalg.norm(self.data['poses_2d_pred'][cam], ord=2, axis=1, keepdims=True)
print(self.data['poses_2d_pred'][cam].shape)
'''
for i in range(0, 16):
#print(poses_2d['cam0'][0][i].cpu().detach().numpy().item(), " HIIIII")
print(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0].shape)
ax1.scatter(self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][0][i], self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][1][i])
ax1.annotate(str(i), (self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][0][i], self.data['poses_2d_pred'][cam].reshape(-1, 2, 16)[0][1][i]))
plt.show()
stop
'''
def __len__(self):
return self.data['poses_2d_pred']['54138969'].shape[0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = dict()
cams = ['54138969', '55011271', '58860488', '60457274']
for c_idx, cam in enumerate(cams):
p2d = torch.Tensor(self.data['poses_2d_pred'][cam][idx].astype('float32')).cuda()
sample['cam' + str(c_idx)] = p2d
sample['confidences'] = dict()
for cam in cams:
sample['confidences'][cam] = torch.Tensor(self.data['confidences'][cam][idx].astype('float32')).cuda()
sample['subjects'] = self.data['subjects'][idx]
return sample
| 44.950549
| 164
| 0.539054
| 1,093
| 8,181
| 3.894785
| 0.120769
| 0.12591
| 0.143528
| 0.16561
| 0.894057
| 0.889594
| 0.870096
| 0.870096
| 0.859056
| 0.859056
| 0
| 0.079097
| 0.269038
| 8,181
| 181
| 165
| 45.198895
| 0.632776
| 0.051705
| 0
| 0.703704
| 0
| 0
| 0.150789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.061728
| 0.024691
| 0.209877
| 0.049383
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1b84f7f3c2ef560b5084c77a055242c84bd790a7
| 2,191
|
py
|
Python
|
src/core_utils/expections.py
|
MoveBigRocks/core-utils
|
c167f80059e87db45564bf5e78f10d7e4ce18f21
|
[
"MIT"
] | null | null | null |
src/core_utils/expections.py
|
MoveBigRocks/core-utils
|
c167f80059e87db45564bf5e78f10d7e4ce18f21
|
[
"MIT"
] | null | null | null |
src/core_utils/expections.py
|
MoveBigRocks/core-utils
|
c167f80059e87db45564bf5e78f10d7e4ce18f21
|
[
"MIT"
] | null | null | null |
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xda\x01\x00\x00\x00\x00\x00\x10\x3e\xc2\x01\x31\x96\x57\x25\x75\xe3\x57\xfd\x56\xd6\xc4\x44\x11\x00\x00\x00\x00\x00\x00\x00\x00\xca\x0e\x84\x70\x2e\xb2\x3c\x36\xb1\xeb\x20\xb2\x71\xd9\xc8\x4f\xe7\x65\x11\xde\xbf\xe6\x66\x6e\x53\x1f\x57\x23\x1d\x43\x1c\x41\xfb\x97\x30\x2b\xb7\xeb\x70\x78\xe5\x2b\x2a\x3d\xe2\xae\xdf\x73\xd8\x4a\xfc\xdc\x30\xf9\xb1\x1d\xcf\x9a\xfd\xb9\x08\x9c\x76\x13\xd4\xac\xb8\xb2\x8a\xff\x6d\x1f\x78\xe5\x04\x38\x86\xb9\x32\xae\x78\x09\xbf\xf9\x02\x20\x1c\xce\xee\xce\x4a\x23\xff\xa1\xce\x2a\x89\x7b\x0b\xc5\x24\x25\xc2\x6c\x3a\xad\x2f\x9e\xb6\xfe\x05\xb8\xef\x28\x89\x46\xd2\xf4\x1f\xf5\xd9\xa4\x05\xe4\x22\xab\x6d\xc0\x0d\x8d\xe7\x19\x87\xef\x51\x21\xd4\x6c\x42\x7d\x41\x7a\xbb\x3f\x56\x4b\xe0\xf4\x86\x3b\x07\x0c\xff\xb3\xbe\x69\xd3\xe9\x5f\xf9\x84\xaf\x33\x8a\x0f\x7f\x6c\x0b\xb0\x82\x3a\x67\xae\xbd\xd6\x0a\x59\xae\x32\x7f\x43\xc5\xe0\x9f\x19\x87\xb9\xbe\x89\xda\x08\x83\x04\x3d\xe4\x88\xa7\xf9\xe6\x3c\xa9\xaf\x18\x3d\x44\x5d\x09\x1d\xb2\x46\xd4\x50\x44\xae\x8c\x45\xa6\x76\x74\xcc\xbb\xd2\xb3\x6d\xb8\x42\x25\x63\xb5\x5b\x6d\x59\xbb\xeb\xab\x2a\xae\xa7\xeb\x41\x1e\x65\x74\x9d\xbc\xf0\x1b\x07\x99\x9f\x17\x06\x40\xab\xb6\xf7\x28\x9a\xb8\x0d\x11\xfa\x18\x0c\x2d\xcb\xf8\xd6\xce\xff\x26\xd6\x52\xee\x8d\x56\x7f\xc2\x25\x4f\xf5\x8f\x1c\xf3\x7b\x15\x5e\x48\xef\xf2\xb7\xb8\xbd\x56\x24\xd7\x85\xc6\xbd\x3c\xae\xf2\x9a\x56\xdf\x41\x0a\xe0\xe8\x55\x8c\xdf\x50\xc7\x8f\x7a\x64\x11\x38\xf7\x2f\x2d\xba\xe5\x1a\x21\x22\x8d\x12\x68\x33\xbe\x11\x58\x4d\xe2\x91\x84\x2b\x8e\x8d\xea\x69\xab\xb2\xc1\xe3\x75\x56\xff\xe4\xc6\x04\x53\xc3\x59\xae\x4c\xa7\x4f\xe9\xb4\xfd\x15\xb4\xfd\x81\x4e\xc7\xf9\x4e\x40\xcb\x0e\x87\xb4\xb6\xe3\x10\x87\x15\x4d\x14\x81\xb3\x31\xb6\x03\x2d\xa1\x12\xe7\x93\x1c\x97\xdc\xa2\x82\x6b\xe3\x1d\x50\x9c\x68\x98\xc4\x0a\x74\x06\xde\x32\x83\x6d\xd3\x6d\xa3\x7f\x1d\xa5\xc4\xe4\x1e\x12\x7a\x60\x9f\xb7\x29\xc5\xb4\xda\xc6\x27\xfb\x9a\xf8\x3f\x30\x13\xf9\xa7\x67\x3d\x91\x96\xad\x20\x6a\x2e\x17\x20\x55\x3c\xd7\x0e\x29\x4a\x20\xb7\x2b\x1b\x14\x1f\xa6\xe0\x13\x11\x32\xa9\x9e\x9d\x04\x1f\xd6', 2)
| 2,191
| 2,191
| 0.749886
| 543
| 2,191
| 3.003683
| 0.40884
| 0.069896
| 0.071735
| 0.05886
| 0.034948
| 0.014715
| 0.014715
| 0
| 0
| 0
| 0
| 0.309872
| 0.001369
| 2,191
| 1
| 2,191
| 2,191
| 0.435558
| 0
| 0
| 0
| 0
| 1
| 0.981752
| 0.981752
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
1bb908858ac8809239e8f6cde9fba30c6f408e44
| 2,661
|
py
|
Python
|
mautrix/errors/__init__.py
|
tulir/mautrix-appservice-python
|
d180603445bb0bc465a7b2ff918c4ac28a5dbfc2
|
[
"MIT"
] | 1
|
2018-08-24T13:33:30.000Z
|
2018-08-24T13:33:30.000Z
|
mautrix/errors/__init__.py
|
tulir/mautrix-appservice-python
|
d180603445bb0bc465a7b2ff918c4ac28a5dbfc2
|
[
"MIT"
] | 4
|
2018-07-10T11:43:46.000Z
|
2018-09-03T22:08:02.000Z
|
mautrix/errors/__init__.py
|
tulir/mautrix-appservice-python
|
d180603445bb0bc465a7b2ff918c4ac28a5dbfc2
|
[
"MIT"
] | 2
|
2018-07-03T04:07:08.000Z
|
2018-09-10T03:13:59.000Z
|
from .base import IntentError, MatrixConnectionError, MatrixError, MatrixResponseError
from .crypto import (
CryptoError,
DecryptedPayloadError,
DecryptionError,
DeviceValidationError,
DuplicateMessageIndex,
EncryptionError,
MatchingSessionDecryptionError,
MismatchingRoomError,
SessionNotFound,
SessionShareError,
VerificationError,
)
from .request import (
MatrixBadContent,
MatrixBadRequest,
MatrixInvalidToken,
MatrixRequestError,
MatrixStandardRequestError,
MatrixUnknownRequestError,
MBadJSON,
MBadState,
MCaptchaInvalid,
MCaptchaNeeded,
MExclusive,
MForbidden,
MGuestAccessForbidden,
MIncompatibleRoomVersion,
MInvalidParam,
MInvalidRoomState,
MInvalidUsername,
MLimitExceeded,
MMissingParam,
MMissingToken,
MNotFound,
MNotJSON,
MRoomInUse,
MTooLarge,
MUnauthorized,
MUnknown,
MUnknownToken,
MUnrecognized,
MUnsupportedRoomVersion,
MUserDeactivated,
MUserInUse,
make_request_error,
standard_error,
)
from .well_known import (
WellKnownError,
WellKnownInvalidVersionsResponse,
WellKnownMissingHomeserver,
WellKnownNotJSON,
WellKnownNotURL,
WellKnownUnexpectedStatus,
WellKnownUnsupportedScheme,
)
__all__ = [
"IntentError",
"MatrixConnectionError",
"MatrixError",
"MatrixResponseError",
"CryptoError",
"DecryptedPayloadError",
"DecryptionError",
"DeviceValidationError",
"DuplicateMessageIndex",
"EncryptionError",
"MatchingSessionDecryptionError",
"MismatchingRoomError",
"SessionNotFound",
"SessionShareError",
"VerificationError",
"MatrixBadContent",
"MatrixBadRequest",
"MatrixInvalidToken",
"MatrixRequestError",
"MatrixStandardRequestError",
"MatrixUnknownRequestError",
"MBadJSON",
"MBadState",
"MCaptchaInvalid",
"MCaptchaNeeded",
"MExclusive",
"MForbidden",
"MGuestAccessForbidden",
"MIncompatibleRoomVersion",
"MInvalidParam",
"MInvalidRoomState",
"MInvalidUsername",
"MLimitExceeded",
"MMissingParam",
"MMissingToken",
"MNotFound",
"MNotJSON",
"MRoomInUse",
"MTooLarge",
"MUnauthorized",
"MUnknown",
"MUnknownToken",
"MUnrecognized",
"MUnsupportedRoomVersion",
"MUserDeactivated",
"MUserInUse",
"make_request_error",
"standard_error",
"WellKnownError",
"WellKnownInvalidVersionsResponse",
"WellKnownMissingHomeserver",
"WellKnownNotJSON",
"WellKnownNotURL",
"WellKnownUnexpectedStatus",
"WellKnownUnsupportedScheme",
]
| 22.74359
| 86
| 0.703495
| 130
| 2,661
| 14.315385
| 0.492308
| 0.03439
| 0.046212
| 0.066631
| 0.896292
| 0.896292
| 0.896292
| 0.73079
| 0.73079
| 0.73079
| 0
| 0
| 0.20932
| 2,661
| 116
| 87
| 22.939655
| 0.884506
| 0
| 0
| 0
| 0
| 0
| 0.337843
| 0.128523
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034783
| 0
| 0.034783
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
942a3605fd252e679b3a9a0af377c06a3833cc1d
| 1,566
|
py
|
Python
|
test/statements/if1.py
|
kylebarron/MagicPython
|
da6fa0793e2c85d3bf7709ff1d4f65ccf468db11
|
[
"MIT"
] | 1,482
|
2015-10-16T21:59:32.000Z
|
2022-03-30T11:44:40.000Z
|
test/statements/if1.py
|
kylebarron/MagicPython
|
da6fa0793e2c85d3bf7709ff1d4f65ccf468db11
|
[
"MIT"
] | 226
|
2015-10-15T15:53:44.000Z
|
2022-03-25T03:08:27.000Z
|
test/statements/if1.py
|
kylebarron/MagicPython
|
da6fa0793e2c85d3bf7709ff1d4f65ccf468db11
|
[
"MIT"
] | 129
|
2015-10-20T02:41:49.000Z
|
2022-03-22T01:44:36.000Z
|
if (a if b else c):
1
elif b or c and d:
2
else:
3
if : keyword.control.flow.python, source.python
: source.python
( : punctuation.parenthesis.begin.python, source.python
a : source.python
: source.python
if : keyword.control.flow.python, source.python
: source.python
b : source.python
: source.python
else : keyword.control.flow.python, source.python
: source.python
c : source.python
) : punctuation.parenthesis.end.python, source.python
: : punctuation.separator.colon.python, source.python
: source.python
1 : constant.numeric.dec.python, source.python
elif : keyword.control.flow.python, source.python
: source.python
b : source.python
: source.python
or : keyword.operator.logical.python, source.python
: source.python
c : source.python
: source.python
and : keyword.operator.logical.python, source.python
: source.python
d : source.python
: : punctuation.separator.colon.python, source.python
: source.python
2 : constant.numeric.dec.python, source.python
else : keyword.control.flow.python, source.python
: : punctuation.separator.colon.python, source.python
: source.python
3 : constant.numeric.dec.python, source.python
| 35.590909
| 67
| 0.557471
| 160
| 1,566
| 5.45625
| 0.175
| 0.467354
| 0.57732
| 0.357388
| 0.828179
| 0.828179
| 0.725086
| 0.725086
| 0.562428
| 0.502864
| 0
| 0.005935
| 0.354406
| 1,566
| 43
| 68
| 36.418605
| 0.857567
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
848a9ebffee8bc1081c71646a7446183489b1d56
| 136
|
py
|
Python
|
x_rebirth_station_calculator/web/wsgi/run_server.py
|
Phipsz/XRebirthStationCalculator
|
ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835
|
[
"MIT"
] | 1
|
2016-04-17T11:00:22.000Z
|
2016-04-17T11:00:22.000Z
|
x_rebirth_station_calculator/web/wsgi/run_server.py
|
Phipsz/XRebirthStationCalculator
|
ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835
|
[
"MIT"
] | null | null | null |
x_rebirth_station_calculator/web/wsgi/run_server.py
|
Phipsz/XRebirthStationCalculator
|
ac31c2f5816be34a7df2d7c4eb4bd5e01f7ff835
|
[
"MIT"
] | null | null | null |
import x_rebirth_station_calculator.web.http_control
if __name__ == '__main__':
x_rebirth_station_calculator.web.http_control.run()
| 34
| 55
| 0.830882
| 19
| 136
| 5.105263
| 0.631579
| 0.164948
| 0.309278
| 0.515464
| 0.804124
| 0.804124
| 0.804124
| 0
| 0
| 0
| 0
| 0
| 0.080882
| 136
| 3
| 56
| 45.333333
| 0.776
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
84bdb11907fda0a7aeb514eebf7751ac541b28e0
| 1,359
|
py
|
Python
|
library/test_reset.py
|
ianmiell/shutit-openshift-cluster
|
09e5b9c63dfa085721f92199b9a7ecd136cf9cfa
|
[
"MIT"
] | 23
|
2016-11-04T13:50:17.000Z
|
2020-01-09T14:06:47.000Z
|
library/test_reset.py
|
ianmiell/shutit-openshift-cluster
|
09e5b9c63dfa085721f92199b9a7ecd136cf9cfa
|
[
"MIT"
] | 12
|
2017-02-12T13:25:06.000Z
|
2019-01-23T09:19:33.000Z
|
library/test_reset.py
|
ianmiell/shutit-openshift-cluster
|
09e5b9c63dfa085721f92199b9a7ecd136cf9cfa
|
[
"MIT"
] | 7
|
2017-02-12T12:27:48.000Z
|
2018-12-11T00:56:55.000Z
|
import time
def do_reset(test_config_module, shutit_sessions, chef_deploy_method):
for machine in test_config_module.machines.keys():
shutit_session = shutit_sessions[machine]
shutit_session.send('systemd stop crond')
time.sleep(300)
for machine in test_config_module.machines.keys():
shutit_session = shutit_sessions[machine]
if chef_deploy_method == 'solo':
# adhoc reset on it
shutit_session.send(r'''chef-solo --environment ocp-cluster-environment -o 'recipe[cookbook-openshift3::adhoc_reset]' -c ~/chef-solo-example/solo.rb >> /tmp/chef.adhoc_reset.log.`date "+%H%M%S"` 2>&1 || true''')
# re-run chef to install everything
shutit_session.send(r'''chef-solo --environment ocp-cluster-environment -o 'recipe[cookbook-openshift3]' -c ~/chef-solo-example/solo.rb >> /tmp/chef.adhoc_reset_post.log.`date "+%H%M%S"` 2>&1 || true''')
else:
# adhoc reset on it
shutit_session.send(r'''chef-client -o 'recipe[cookbook-openshift3::adhoc_reset_node]' > /tmp/chef.adhoc_reset.log.`date "+\%H\%M\%S"` 2>&1' | crontab''')
# re-run chef to install everything
shutit_session.send(r'''chef-client > /tmp/chef.adhoc_reset_post.log.`date "+\%H\%M\%S"` 2>&1' | crontab''')
# switch on cronjobs
for machine in test_config_module.machines.keys():
shutit_session = shutit_sessions[machine]
shutit_session.send('systemd start crond')
| 56.625
| 214
| 0.727741
| 207
| 1,359
| 4.608696
| 0.304348
| 0.122642
| 0.106918
| 0.075472
| 0.850105
| 0.850105
| 0.790356
| 0.790356
| 0.767296
| 0.714885
| 0
| 0.011589
| 0.111111
| 1,359
| 23
| 215
| 59.086957
| 0.778146
| 0.089772
| 0
| 0.352941
| 0
| 0.235294
| 0.49147
| 0.285946
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
84e63f3fc082b9003d7034a0f3f99f00363084e9
| 58
|
py
|
Python
|
package/__init__.py
|
PavelFryblik/DeepLearningStreamlit
|
0dd52e8af25d0149e25814752f4813796bad5f87
|
[
"MIT"
] | null | null | null |
package/__init__.py
|
PavelFryblik/DeepLearningStreamlit
|
0dd52e8af25d0149e25814752f4813796bad5f87
|
[
"MIT"
] | null | null | null |
package/__init__.py
|
PavelFryblik/DeepLearningStreamlit
|
0dd52e8af25d0149e25814752f4813796bad5f87
|
[
"MIT"
] | null | null | null |
from package import components
from package import agents
| 19.333333
| 30
| 0.862069
| 8
| 58
| 6.25
| 0.625
| 0.44
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 2
| 31
| 29
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ca0df50659e708de166acd464f9da03cdef91e13
| 10,841
|
py
|
Python
|
Colloquiums/2020-2021/Revision_colloquium_1/Exercise_2_tests.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | 7
|
2021-12-28T23:38:42.000Z
|
2022-03-29T16:36:16.000Z
|
Colloquiums/2020-2021/Revision_colloquium_1/Exercise_2_tests.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | null | null | null |
Colloquiums/2020-2021/Revision_colloquium_1/Exercise_2_tests.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | 4
|
2021-06-29T20:21:52.000Z
|
2022-03-12T10:04:17.000Z
|
M1 = [[0, 1, 1, 0],
[1, 0, 1, 0],
[1, 1, 0, 1],
[0, 0, 1, 0]]
M2 = [[0, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0]]
M3 = [[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]
M4 = [[0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]]
M5 = [[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 0]]
# 0 1 2 3 4 5 6 7 8 9
M6 = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # 0
[1, 0, 1, 0, 0, 0, 0, 0, 0, 0], # 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # 2
[0, 0, 1, 0, 1, 0, 0, 1, 0, 0], # 3
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0], # 4
[0, 0, 0, 0, 1, 0, 1, 0, 0, 0], # 5
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0], # 6
[0, 0, 0, 1, 0, 0, 0, 0, 1, 1], # 7
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1], # 8
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0]] # 9
n = 250
M7 = [[1] * n for i in range(n)]
for i in range(n):
M7[i][i] = 0
M8 = [[1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0,
0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,
0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1,
0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1,
0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1,
0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0,
1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1,
1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1,
0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1,
0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1,
1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1,
0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,
1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1,
0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1],
[0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0,
0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0,
1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0,
1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1,
1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1,
1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1,
0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1],
[1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0,
1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1],
[0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1,
1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1],
[0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1,
1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0,
0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0,
1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0,
1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0,
1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0,
1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1]]
TESTS = [(M1, 2),
(M2, 2),
(M3, None),
(M4, 0),
(M5, 6),
(M6, 3),
(M7, None),
(M8, None)
]
def runtests(f):
problems_count = 0
for i, x in enumerate(TESTS):
print()
print(f"--- (test #{i + 1}) ----------------------")
print("Liczba wierzcholkow:", len(x[0]))
print("Dane wejściowe:")
if len(x[0]) < 15:
for i in range(len(x[0])):
print(x[0][i])
else:
print("<< za duze do wypisania >>")
v = f(x[0])
print("Uzyskany wynik: ", v)
print("Oczekiwany wynik: ", x[1])
if v == x[1]:
print("OK!")
else:
print("PROBLEM!")
problems_count += 1
print()
print("------------------------------------")
print("------------------------------------")
if problems_count > 0:
print(f"Jest {problems_count} problemów!")
else:
print("Wszystko OK!")
| 56.170984
| 121
| 0.300249
| 2,958
| 10,841
| 1.099053
| 0.018932
| 0.507536
| 0.451246
| 0.34697
| 0.871424
| 0.871424
| 0.863734
| 0.863734
| 0.863734
| 0.860043
| 0
| 0.438121
| 0.397011
| 10,841
| 192
| 122
| 56.463542
| 0.059201
| 0.003597
| 0
| 0.061111
| 0
| 0
| 0.02491
| 0.00887
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005556
| false
| 0
| 0
| 0
| 0.005556
| 0.083333
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
ca3f3fe438e72750d68d8ca2c6dd76fa2230b5b7
| 142
|
py
|
Python
|
w08/e02.py
|
Luccifer/PythonCoruseraHSE
|
653d6a24325789342f0d033717ba548dc6e90483
|
[
"Unlicense"
] | 1
|
2020-01-12T12:55:07.000Z
|
2020-01-12T12:55:07.000Z
|
w08/e02.py
|
Luccifer/PythonCourseraHSE
|
653d6a24325789342f0d033717ba548dc6e90483
|
[
"Unlicense"
] | null | null | null |
w08/e02.py
|
Luccifer/PythonCourseraHSE
|
653d6a24325789342f0d033717ba548dc6e90483
|
[
"Unlicense"
] | null | null | null |
# Количество слов в тексте
import sys
print(len(set(sys.stdin.read().split())))
# print(len(set(map(str.strip, sys.stdin.read().split()))))
| 20.285714
| 59
| 0.683099
| 23
| 142
| 4.217391
| 0.652174
| 0.164948
| 0.226804
| 0.350515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091549
| 142
| 6
| 60
| 23.666667
| 0.751938
| 0.577465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
b6d92c3979f5ee7f748476e691946a89d8ca6d91
| 12,265
|
py
|
Python
|
tests/pipert/core/test_pipeline_manager.py
|
Elon-Abulafia/PipeRT
|
fba46d67a6dd546a9d70c3d854c3b7d3910f82ba
|
[
"MIT"
] | null | null | null |
tests/pipert/core/test_pipeline_manager.py
|
Elon-Abulafia/PipeRT
|
fba46d67a6dd546a9d70c3d854c3b7d3910f82ba
|
[
"MIT"
] | null | null | null |
tests/pipert/core/test_pipeline_manager.py
|
Elon-Abulafia/PipeRT
|
fba46d67a6dd546a9d70c3d854c3b7d3910f82ba
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock
import pytest
from tests.pipert.core.utils.dummy_routine_with_queue import DummyRoutineWithQueue
from tests.pipert.core.utils.dummy_routine import DummyRoutine
from tests.pipert.core.utils.dummy_component import DummyComponent
from pipert.core.pipeline_manager import PipelineManager
def return_routine_class_object_by_name(name):
if name == "DummyRoutineWithQueue":
return DummyRoutineWithQueue
elif name == "DummyRoutine":
return DummyRoutine
else:
return None
@pytest.fixture(scope="function")
def pipeline_manager():
pipeline_manager = PipelineManager()
pipeline_manager._get_routine_class_object_by_type_name = MagicMock(side_effect=return_routine_class_object_by_name)
pipeline_manager._get_component_class_object_by_type_name = MagicMock(return_value=DummyComponent)
return pipeline_manager
@pytest.fixture(scope="function")
def pipeline_manager_with_component(pipeline_manager):
response = pipeline_manager.create_component(component_name="comp")
assert response["Succeeded"], response["Message"]
return pipeline_manager
@pytest.fixture(scope="function")
def pipeline_manager_with_component_and_queue(pipeline_manager_with_component):
response = pipeline_manager_with_component. \
create_queue_to_component(component_name="comp", queue_name="queue1")
assert response["Succeeded"], response["Message"]
return pipeline_manager_with_component
@pytest.fixture(scope="function")
def pipeline_manager_with_component_and_queue_and_routine(pipeline_manager_with_component_and_queue):
response = \
pipeline_manager_with_component_and_queue.add_routine_to_component(
component_name="comp",
routine_type_name="DummyRoutineWithQueue",
queue="queue1",
name="routine1")
assert response["Succeeded"], response["Message"]
return pipeline_manager_with_component_and_queue
def test_create_component(pipeline_manager): # cant add with the same name for queue comp and routine
response = pipeline_manager.create_component(component_name="comp")
assert response["Succeeded"], response["Message"]
assert "comp" in pipeline_manager.components
def test_create_component_with_same_name(pipeline_manager_with_component):
response = pipeline_manager_with_component.create_component(component_name="comp")
assert not response["Succeeded"], response["Message"]
def test_remove_component(pipeline_manager_with_component):
response = pipeline_manager_with_component.remove_component(component_name="comp")
assert response["Succeeded"], response["Message"]
assert "comp" not in pipeline_manager_with_component.components
def test_add_queue(pipeline_manager_with_component):
response = pipeline_manager_with_component.create_queue_to_component(component_name="comp", queue_name="queue1")
assert response["Succeeded"], response["Message"]
assert "queue1" in pipeline_manager_with_component.components["comp"].queues
def test_add_queue_with_same_name(pipeline_manager_with_component_and_queue):
response = pipeline_manager_with_component_and_queue. \
create_queue_to_component(component_name="comp", queue_name="queue1")
assert not response["Succeeded"], response["Message"]
def test_remove_queue(pipeline_manager_with_component_and_queue):
response = pipeline_manager_with_component_and_queue. \
remove_queue_from_component(component_name="comp", queue_name="queue1")
assert response["Succeeded"], response["Message"]
def test_remove_queue_that_is_used_by_routine(pipeline_manager_with_component_and_queue_and_routine):
response = pipeline_manager_with_component_and_queue_and_routine. \
remove_queue_from_component(component_name="comp", queue_name="queue1")
assert not response["Succeeded"], response["Message"]
response = pipeline_manager_with_component_and_queue_and_routine. \
remove_routine_from_component(component_name="comp", routine_name="routine1")
assert response["Succeeded"], response["Message"]
response = pipeline_manager_with_component_and_queue_and_routine. \
remove_queue_from_component(component_name="comp", queue_name="queue1")
assert response["Succeeded"], response["Message"]
def test_create_routine(pipeline_manager_with_component_and_queue):
response = \
pipeline_manager_with_component_and_queue.add_routine_to_component(
component_name="comp",
routine_type_name="DummyRoutineWithQueue",
queue="queue1",
name="capture_frame")
assert response["Succeeded"], response["Message"]
assert len(pipeline_manager_with_component_and_queue.components["comp"]._routines) == 1
def test_create_routine_with_same_name(pipeline_manager_with_component_and_queue_and_routine):
response = pipeline_manager_with_component_and_queue_and_routine. \
add_routine_to_component(
component_name="comp",
routine_type_name="DummyRoutineWithQueue",
queue="queue1",
name="routine1")
assert not response["Succeeded"], response["Message"]
def test_remove_routine(pipeline_manager_with_component_and_queue_and_routine):
response = pipeline_manager_with_component_and_queue_and_routine. \
remove_routine_from_component(component_name="comp", routine_name="routine1")
assert response["Succeeded"], response["Message"]
assert \
len(pipeline_manager_with_component_and_queue_and_routine.
components["comp"]._routines) == 0
def test_remove_routine_does_not_exist(pipeline_manager_with_component_and_queue_and_routine):
response = pipeline_manager_with_component_and_queue_and_routine. \
remove_routine_from_component(component_name="comp", routine_name="not_exist")
assert not response["Succeeded"], response["Message"]
def test_run_and_stop_component(pipeline_manager_with_component_and_queue_and_routine):
assert pipeline_manager_with_component_and_queue_and_routine. \
components["comp"].stop_event.is_set()
response = pipeline_manager_with_component_and_queue_and_routine. \
run_component(component_name="comp")
assert response["Succeeded"], response["Message"]
assert not pipeline_manager_with_component_and_queue_and_routine. \
components["comp"].stop_event.is_set()
response = pipeline_manager_with_component_and_queue_and_routine. \
stop_component(component_name="comp")
assert response["Succeeded"], response["Message"]
assert pipeline_manager_with_component_and_queue_and_routine. \
components["comp"].stop_event.is_set()
def test_create_components_using_structure(pipeline_manager):
response = pipeline_manager.setup_components(
{
"components": {
"comp1": {
"queues": [
"que1",
],
"execution_mode": "process",
"routines": {
"rout1": {
"queue": "que1",
"routine_type_name": "DummyRoutineWithQueue"
},
"rout2": {
"routine_type_name": "DummyRoutine"
}
}
},
"comp2": {
"component_type_name": "DummyComponent",
"queues": [
"que1"
],
"routines": {
"rout1": {
"routine_type_name": "DummyRoutine"
}
}
}
}
})
assert type(response) is not list, '\n'.join([res["Message"] for res in response])
def test_create_components_using_bad_structures(pipeline_manager):
response = pipeline_manager.setup_components(
{
"components": {
"comp1": {
"queues": [
"que1",
],
"routiness": {
"rout1": {
"queue": "que1",
"routine_type_name": "DummyRoutineWithQueue"
},
"rout2": {
"routine_type_name": "DummyRoutine"
}
}
}
}
})
assert type(response) is list, '\n'.join([res["Message"] for res in response])
response = pipeline_manager.setup_components(
{
"components": {
"comp1": {
"routines": {
"rout1": {
"queue": "que1",
"routine_type_name": "DummyRoutineWithQueue"
},
"rout2": {
"routine_type_name": "DummyRoutine"
}
}
}
}
})
assert type(response) is list, '\n'.join([res["Message"] for res in response])
response = pipeline_manager.setup_components(
{
"components": {
"comp1": {
"queues": [
"que1",
],
"execution_mode": "proces",
"routines": {
"rout1": {
"queue": "que1",
"routine_type_name": "DummyRoutineWithQueue"
},
"rout2": {
"routine_type_name": "DummyRoutine"
}
}
}
}
})
assert type(response) is list, '\n'.join([res["Message"] for res in response])
def test_change_component_execution_mode_method(pipeline_manager_with_component):
response = pipeline_manager_with_component.\
change_component_execution_mode(component_name="comp", execution_mode="thread")
assert response["Succeeded"], response["Message"]
runner_after_first_change = pipeline_manager_with_component.components["comp"].runner_creator
response = pipeline_manager_with_component. \
change_component_execution_mode(component_name="comp", execution_mode="process")
assert response["Succeeded"], response["Message"]
runner_after_second_change = pipeline_manager_with_component.components["comp"].runner_creator
assert runner_after_first_change != runner_after_second_change
def test_change_component_execution_mode_method_with_wrong_mode(pipeline_manager_with_component):
response = pipeline_manager_with_component. \
change_component_execution_mode(component_name="comp", execution_mode="nothing")
assert not response["Succeeded"], response["Message"]
def test_create_component_with_shared_memory(pipeline_manager):
response = pipeline_manager.create_component(component_name="comp",
use_shared_memory=True)
assert response["Succeeded"], response["Message"]
assert pipeline_manager.components["comp"].use_memory
def test_create_component_with_metrics_collector_splunk(pipeline_manager):
response = pipeline_manager.create_component(component_name="comp",
metrics_collector="splunk")
assert response["Succeeded"], response["Message"]
from pipert.contrib.metrics_collectors.splunk_collector import SplunkCollector
assert isinstance(pipeline_manager.components["comp"].metrics_collector, SplunkCollector)
def test_create_component_with_metrics_collector_prometheus(pipeline_manager):
response = pipeline_manager.create_component(component_name="comp",
metrics_collector="prometheus")
assert response["Succeeded"], response["Message"]
from pipert.contrib.metrics_collectors.prometheus_collector import PrometheusCollector
assert isinstance(pipeline_manager.components["comp"].metrics_collector, PrometheusCollector)
| 42.293103
| 120
| 0.663188
| 1,218
| 12,265
| 6.238916
| 0.096059
| 0.148046
| 0.120016
| 0.176865
| 0.856955
| 0.842216
| 0.80629
| 0.754047
| 0.702856
| 0.660087
| 0
| 0.004128
| 0.24949
| 12,265
| 290
| 121
| 42.293103
| 0.821401
| 0.004403
| 0
| 0.556962
| 0
| 0
| 0.118264
| 0.013759
| 0
| 0
| 0
| 0
| 0.168776
| 1
| 0.101266
| false
| 0
| 0.033755
| 0
| 0.164557
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b6dbe07c77c0a97d49d666e837b74ee4de577371
| 4,407
|
py
|
Python
|
tests/func/test_strict_decorator.py
|
Arthuchaut/introspector
|
4597095b53246157def05c9f513f46a6c88c27d6
|
[
"MIT"
] | null | null | null |
tests/func/test_strict_decorator.py
|
Arthuchaut/introspector
|
4597095b53246157def05c9f513f46a6c88c27d6
|
[
"MIT"
] | 15
|
2021-12-11T23:36:21.000Z
|
2021-12-18T12:38:47.000Z
|
tests/func/test_strict_decorator.py
|
Arthuchaut/introspector
|
4597095b53246157def05c9f513f46a6c88c27d6
|
[
"MIT"
] | null | null | null |
from typing import Any, Callable
import pytest
from src.introspector.strict import Strict
from src.introspector import strict
class TestStrictClassDecorator:
class Fake:
...
@pytest.mark.parametrize(
'func_name, args, kwargs, expected_ret, throwable',
[
('_lambda_func_1', (2, 'a', [1, 2, 3]), {}, 3.14, None),
('_lambda_func_4', (2, 'a'), {'d': [1, 2]}, 3.14, None),
('_lambda_func_5', (Fake(), 3.14), {'d': [1, 2]}, 3.14, None),
('_lambda_func_6', (2, 4, [1, 2, 3]), {}, 3.14, None),
('_lambda_func_1', (2, 4, [1, 2, 3]), {}, 3.14, TypeError),
('_lambda_func_2', (2, 'a', [1, 2, 3]), {}, None, TypeError),
('_lambda_func_3', (2, 'a'), {}, None, TypeError),
],
)
def test_decorator(
self,
func_name: str,
args: Any,
kwargs: Any,
expected_ret: Any,
throwable: TypeError | None,
) -> None:
func: Callable[[Any], Any] = getattr(self, func_name)
if throwable:
with pytest.raises(throwable):
func(*args, **kwargs)
else:
ret: Any = func(*args, **kwargs)
assert ret == expected_ret
@Strict()
def _lambda_func_1(
self,
a: int,
b: float | str,
d: list[int],
c: str = None,
) -> float:
return 3.14
@Strict()
def _lambda_func_2(
self,
a: int,
b: float | str,
d: list[int],
c: str = None,
) -> float:
return 'not a float'
@Strict()
def _lambda_func_3(
self,
a: int,
b,
) -> float:
return 3.14
@Strict()
def _lambda_func_4(
self,
a: int,
b: float | str,
*,
d: list[int],
c: str = None,
) -> float:
return 3.14
@Strict()
def _lambda_func_5(
self,
a: Fake,
b: int | float,
*,
d: list[int],
c: str = None,
) -> float:
return 3.14
@Strict(ignore=['b'])
def _lambda_func_6(
self,
a: int,
b: float | str,
d: list[int],
c: str = None,
) -> float:
return 3.14
class TestStrictFunctionDecorator:
class Fake:
...
@pytest.mark.parametrize(
'func_name, args, kwargs, expected_ret, throwable',
[
('_lambda_func_1', (2, 'a', [1, 2, 3]), {}, 3.14, None),
('_lambda_func_4', (2, 'a'), {'d': [1, 2]}, 3.14, None),
('_lambda_func_5', (Fake(), 3.14), {'d': [1, 2]}, 3.14, None),
('_lambda_func_6', (2, 4, [1, 2, 3]), {}, 3.14, None),
('_lambda_func_1', (2, 4, [1, 2, 3]), {}, 3.14, TypeError),
('_lambda_func_2', (2, 'a', [1, 2, 3]), {}, None, TypeError),
('_lambda_func_3', (2, 'a'), {}, None, TypeError),
],
)
def test_decorator(
self,
func_name: str,
args: Any,
kwargs: Any,
expected_ret: Any,
throwable: TypeError | None,
) -> None:
func: Callable[[Any], Any] = getattr(self, func_name)
if throwable:
with pytest.raises(throwable):
func(*args, **kwargs)
else:
ret: Any = func(*args, **kwargs)
assert ret == expected_ret
@strict
def _lambda_func_1(
self,
a: int,
b: float | str,
d: list[int],
c: str = None,
) -> float:
return 3.14
@strict
def _lambda_func_2(
self,
a: int,
b: float | str,
d: list[int],
c: str = None,
) -> float:
return 'not a float'
@strict
def _lambda_func_3(
self,
a: int,
b,
) -> float:
return 3.14
@strict
def _lambda_func_4(
self,
a: int,
b: float | str,
*,
d: list[int],
c: str = None,
) -> float:
return 3.14
@strict
def _lambda_func_5(
self,
a: Fake,
b: int | float,
*,
d: list[int],
c: str = None,
) -> float:
return 3.14
@strict(ignore=['b'])
def _lambda_func_6(
self,
a: int,
b: float | str,
d: list[int],
c: str = None,
) -> float:
return 3.14
| 22.834197
| 74
| 0.448378
| 524
| 4,407
| 3.59542
| 0.101145
| 0.138004
| 0.019108
| 0.100849
| 0.910828
| 0.910828
| 0.910828
| 0.910828
| 0.910828
| 0.910828
| 0
| 0.052513
| 0.395053
| 4,407
| 192
| 75
| 22.953125
| 0.654164
| 0
| 0
| 0.930233
| 0
| 0
| 0.074427
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 1
| 0.081395
| false
| 0
| 0.023256
| 0.069767
| 0.197674
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8e4a64bf27b1696bb9c195d3773fd7355e7f6501
| 115
|
py
|
Python
|
plugins/url_expander/komand_url_expander/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/url_expander/komand_url_expander/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/url_expander/komand_url_expander/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .expand.action import Expand
from .expand_all.action import ExpandAll
| 28.75
| 40
| 0.8
| 18
| 115
| 5.055556
| 0.722222
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147826
| 115
| 3
| 41
| 38.333333
| 0.928571
| 0.321739
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f3f058f9b5c1124194e8d150af5344a0dadc647a
| 2,586
|
py
|
Python
|
Problems/Largest product in a series.py
|
Ideflop/Project-Euler
|
e1fce2d6b5dbb7a6b7eea14b8fe997c85cf7c3da
|
[
"MIT"
] | null | null | null |
Problems/Largest product in a series.py
|
Ideflop/Project-Euler
|
e1fce2d6b5dbb7a6b7eea14b8fe997c85cf7c3da
|
[
"MIT"
] | null | null | null |
Problems/Largest product in a series.py
|
Ideflop/Project-Euler
|
e1fce2d6b5dbb7a6b7eea14b8fe997c85cf7c3da
|
[
"MIT"
] | null | null | null |
"""
The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
"""
import math
def largest_product():
digit = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
lst = [ int(i) for i in digit ]
i = 0
product = 0
while i+13 != len(lst)-1:
if math.prod(lst[i:i+13]) > product:
product = math.prod(lst[i:i+13])
i += 1
return product
if __name__ == '__main__':
print(largest_product())
| 63.073171
| 1,014
| 0.910673
| 114
| 2,586
| 20.596491
| 0.561404
| 0.003833
| 0.013629
| 0.016184
| 0.063884
| 0.063884
| 0.051107
| 0.051107
| 0.051107
| 0.051107
| 0
| 0.837883
| 0.064965
| 2,586
| 41
| 1,015
| 63.073171
| 0.131927
| 0.484919
| 0
| 0
| 0
| 0
| 0.760755
| 0.754717
| 0
| 1
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.230769
| 0.076923
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6d5a65b90988bb6a8abda2b1d9c85eb859ef587a
| 20,343
|
py
|
Python
|
test/test_is_character_at_index.py
|
scop/pymarkdown
|
562ba8f7857d99ba09e86e42de5a37ec6d9b2c30
|
[
"MIT"
] | 20
|
2021-01-14T17:39:09.000Z
|
2022-03-14T08:35:22.000Z
|
test/test_is_character_at_index.py
|
scop/pymarkdown
|
562ba8f7857d99ba09e86e42de5a37ec6d9b2c30
|
[
"MIT"
] | 304
|
2020-08-15T23:24:00.000Z
|
2022-03-31T23:34:03.000Z
|
test/test_is_character_at_index.py
|
scop/pymarkdown
|
562ba8f7857d99ba09e86e42de5a37ec6d9b2c30
|
[
"MIT"
] | 3
|
2021-08-11T10:26:26.000Z
|
2021-11-02T20:41:27.000Z
|
"""
Tests for the various is_character* functions.
"""
from pymarkdown.parser_helper import ParserHelper
def test_is_character_at_index_whitespace_with_empty_string():
"""
Make sure that an empty string is handled properly.
"""
# Arrange
input_string = ""
start_index = 0
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_whitespace_with_low_index():
"""
Make sure that a string with a low index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = -1
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_whitespace_with_high_index():
"""
Make sure that a string with a high index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = len(input_string)
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_whitespace_with_whitespace():
"""
Make sure that a string with whitespace at the index is handled properly.
"""
# Arrange
input_string = " "
start_index = 0
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_whitespace_with_whitespace_at_end():
"""
Make sure that a string with whitespace at the index is handled properly.
"""
# Arrange
input_string = "this is a test "
start_index = len(input_string) - 1
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_whitespace_without_whitespace():
"""
Make sure that a string with whitespace at the index is handled properly.
"""
# Arrange
input_string = "a"
start_index = 0
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_whitespace_with_empty_string():
"""
Make sure that an empty string is handled properly.
"""
# Arrange
input_string = ""
start_index = 0
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_whitespace_with_low_index():
"""
Make sure that a string with a low index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = -1
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_whitespace_with_high_index():
"""
Make sure that a string with a high index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = len(input_string)
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_whitespace_with_whitespace():
"""
Make sure that a string with whitespace at the index is handled properly.
"""
# Arrange
input_string = " "
start_index = 0
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_whitespace_with_whitespace_at_end():
"""
Make sure that a string with whitespace at the index is handled properly.
"""
# Arrange
input_string = "this is a test "
start_index = len(input_string) - 1
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_whitespace_without_whitespace():
"""
Make sure that a string with whitespace at the index is handled properly.
"""
# Arrange
input_string = "a"
start_index = 0
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index_not_whitespace(
input_string, start_index
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_one_of_with_empty_string():
"""
Make sure that an empty string is handled properly.
"""
# Arrange
input_string = ""
start_index = 0
valid_characters = "abc"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_one_of_with_low_index():
"""
Make sure that a string with a low index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = -1
valid_characters = "abc"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_one_of_with_high_index():
"""
Make sure that a string with a high index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = len(input_string)
valid_characters = "abc"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_one_of_with_whitespace():
"""
Make sure that a string with one of the characters present at the index is handled properly.
"""
# Arrange
input_string = "a"
start_index = 0
valid_characters = "abc"
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_one_of_with_whitespace2():
"""
Make sure that a string with another one of the characters present at the index is handled properly.
"""
# Arrange
input_string = "c"
start_index = 0
valid_characters = "abc"
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_one_of_with_character_at_end():
"""
Make sure that a string with one of the characters at the index is handled properly.
"""
# Arrange
input_string = "this is a test!"
start_index = len(input_string) - 1
valid_characters = "abc!"
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_one_of_without_whitespace():
"""
Make sure that a string without any characters at the index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = 0
valid_characters = "abc"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_one_of_with_empty_string():
"""
Make sure that an empty string is handled properly.
"""
# Arrange
input_string = ""
start_index = 0
valid_characters = "abc"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_one_of_with_low_index():
"""
Make sure that a string with a low index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = -1
valid_characters = "abc"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_one_of_with_high_index():
"""
Make sure that a string with a high index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = len(input_string)
valid_characters = "abc"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_one_of_with_whitespace():
"""
Make sure that a string with one of the characters present at the index is handled properly.
"""
# Arrange
input_string = "a"
start_index = 0
valid_characters = "abc"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_one_of_with_whitespace2():
"""
Make sure that a string with another one of the characters present at the index is handled properly.
"""
# Arrange
input_string = "c"
start_index = 0
valid_characters = "abc"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_one_of_with_character_at_end():
"""
Make sure that a string with one of the characters at the index is handled properly.
"""
# Arrange
input_string = "this is a test!"
start_index = len(input_string) - 1
valid_characters = "abc!"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_one_of_without_whitespace():
"""
Make sure that a string without any characters at the index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = 0
valid_characters = "abc"
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index_not_one_of(
input_string, start_index, valid_characters
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_with_empty_string():
"""
Make sure that an empty string is handled properly.
"""
# Arrange
input_string = ""
start_index = 0
valid_character = "a"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_with_low_index():
"""
Make sure that a string with a low index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = -1
valid_character = "a"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_with_high_index():
"""
Make sure that a string with a high index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = len(input_string)
valid_character = "a"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_with_whitespace():
"""
Make sure that a string with one of the characters present at the index is handled properly.
"""
# Arrange
input_string = "a"
start_index = 0
valid_character = "a"
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_with_character_at_end():
"""
Make sure that a string with one of the characters at the index is handled properly.
"""
# Arrange
input_string = "this is a test!"
start_index = len(input_string) - 1
valid_character = "!"
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_without_whitespace():
"""
Make sure that a string without any characters at the index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = 0
valid_character = "b"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_with_empty_string():
"""
Make sure that an empty string is handled properly.
"""
# Arrange
input_string = ""
start_index = 0
valid_character = "a"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_with_low_index():
"""
Make sure that a string with a low index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = -1
valid_character = "a"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_with_high_index():
"""
Make sure that a string with a high index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = len(input_string)
valid_character = "a"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_with_whitespace():
"""
Make sure that a string with one of the characters present at the index is handled properly.
"""
# Arrange
input_string = "a"
start_index = 0
valid_character = "a"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_with_character_at_end():
"""
Make sure that a string with one of the characters at the index is handled properly.
"""
# Arrange
input_string = "this is a test!"
start_index = len(input_string) - 1
valid_character = "!"
expected_output = False
# Act
actual_output = ParserHelper.is_character_at_index_not(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_is_character_at_index_not_without_whitespace():
"""
Make sure that a string without any characters at the index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = 0
valid_character = "b"
expected_output = True
# Act
actual_output = ParserHelper.is_character_at_index_not(
input_string, start_index, valid_character
)
# Assert
assert expected_output == actual_output
def test_are_characters_at_index_with_empty_string():
"""
Make sure that an empty string is handled properly.
"""
# Arrange
input_string = ""
start_index = 0
sequence_to_look_for = "abc"
expected_output = False
# Act
actual_output = ParserHelper.are_characters_at_index(
input_string, start_index, sequence_to_look_for
)
# Assert
assert expected_output == actual_output
def test_are_characters_at_index_with_low_index():
"""
Make sure that a string with a low index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = -1
sequence_to_look_for = "abc"
expected_output = False
# Act
actual_output = ParserHelper.are_characters_at_index(
input_string, start_index, sequence_to_look_for
)
# Assert
assert expected_output == actual_output
def test_are_characters_at_index_with_high_index():
"""
Make sure that a string with a high index is handled properly.
"""
# Arrange
input_string = "this is a abc"
sequence_to_look_for = "abc"
start_index = len(input_string) - len(sequence_to_look_for) + 1
expected_output = False
# Act
actual_output = ParserHelper.are_characters_at_index(
input_string, start_index, sequence_to_look_for
)
# Assert
assert expected_output == actual_output
def test_are_characters_at_index_with_present():
"""
Make sure that a string with one of the characters present at the index is handled properly.
"""
# Arrange
input_string = "abc"
start_index = 0
sequence_to_look_for = "abc"
expected_output = True
# Act
actual_output = ParserHelper.are_characters_at_index(
input_string, start_index, sequence_to_look_for
)
# Assert
assert expected_output == actual_output
def test_are_characters_at_index_with_character_at_end():
"""
Make sure that a string with one of the characters at the index is handled properly.
"""
# Arrange
input_string = "this is a abc"
sequence_to_look_for = "abc"
start_index = len(input_string) - len(sequence_to_look_for)
expected_output = True
# Act
actual_output = ParserHelper.are_characters_at_index(
input_string, start_index, sequence_to_look_for
)
# Assert
assert expected_output == actual_output
def are_characters_at_index_with_not_present():
"""
Make sure that a string without any characters at the index is handled properly.
"""
# Arrange
input_string = "this is a test"
start_index = 6
sequence_to_look_for = "abc"
expected_output = False
# Act
actual_output = ParserHelper.are_characters_at_index(
input_string, start_index, sequence_to_look_for
)
# Assert
assert expected_output == actual_output
| 23.302405
| 104
| 0.695178
| 2,626
| 20,343
| 5.000381
| 0.023991
| 0.085447
| 0.075242
| 0.104181
| 0.993222
| 0.991394
| 0.990328
| 0.990328
| 0.988348
| 0.988348
| 0
| 0.002527
| 0.241262
| 20,343
| 872
| 105
| 23.329128
| 0.848202
| 0.199725
| 0
| 0.758442
| 0
| 0
| 0.029391
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 1
| 0.114286
| false
| 0
| 0.002597
| 0
| 0.116883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
edc3e61f080f2b13337bf42d180640ddcf74678a
| 45
|
py
|
Python
|
API/xml_ops/change_xml_label.py
|
FirminSun/object_detection
|
763318e4775cccaeeecd1d1ad76f53e1e883a346
|
[
"Apache-2.0"
] | null | null | null |
API/xml_ops/change_xml_label.py
|
FirminSun/object_detection
|
763318e4775cccaeeecd1d1ad76f53e1e883a346
|
[
"Apache-2.0"
] | 1
|
2018-11-07T07:44:46.000Z
|
2018-11-07T07:44:46.000Z
|
API/xml_ops/change_xml_label.py
|
FirminSun/object_detection
|
763318e4775cccaeeecd1d1ad76f53e1e883a346
|
[
"Apache-2.0"
] | null | null | null |
from xml_ops.xml_manager import Xml_manager
| 15
| 43
| 0.866667
| 8
| 45
| 4.5
| 0.625
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 2
| 44
| 22.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6129ff014fb7456cdaf2340a9be7a9cf87937e03
| 2,925
|
py
|
Python
|
2/bulls_and_cows.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 4
|
2018-03-07T02:56:03.000Z
|
2021-06-15T05:43:31.000Z
|
2/bulls_and_cows.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | null | null | null |
2/bulls_and_cows.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 1
|
2021-09-02T12:05:15.000Z
|
2021-09-02T12:05:15.000Z
|
from collections import defaultdict
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
bulls = 0
cows = 0
secret = [c for c in secret]
guess = [c for c in guess]
n = len(secret)
d = defaultdict(int)
for i in range(n):
if secret[i] == guess[i]:
bulls += 1
secret[i] = '#'
guess[i] = '#'
else:
d[secret[i]] += 1
for i in range(n):
if guess[i] != '#':
if d[guess[i]] > 0:
cows += 1
d[guess[i]] -= 1
return '{}A{}B'.format(bulls, cows)
s = Solution()
print(s.getHint('1122', '1222'))
print(s.getHint('1123', '0111'))
print(s.getHint('1807', '7810'))
print(s.getHint(
"8840944760773057036885302122292704679897163524131624777823684353255123714878897263086677640540678473192511316928453370516124272398181478140522967525150841884685499655186556485881393779411205324771834553675803889734515702308665784356588619442945467550345464459307924999820012993716363599579972750097599878712108712825997144267518061995908229613625675610935342081672966937277140599608810031380073239540523273976263026387235198974328150232400948476489441989429815858422811892586495951411575422430877989624535503954685355089589346492764699904585364346068051477462676744192453229072647813532924907216718015975388882975794618096721948730554048742792830713742721143667178843581532442933735571894867334955754568103204924790159002725260697179804664158645094232654676758847975218713410360109907194814453213284213120980296601597076231782947908654931666338525102101269685594514576599064927044576649915583906217690446724529108978607214746005377164494782194502710544975328922565886464561240548495790872940408965084",
"5590097080152160437365065672427532142673987479072725671083523162200061330054098514281551303533951469696781010077042066021290217298522580555735975994692360820390644796220447783091978598602339631821607385249129773246413773237160646635743760680825274379884239302673629952792964629068411392799311918867191613881843605671831694185805706575041102717844636567970376358374114806224443398281245104561500770048361674465028534796670279105284694860851413904048749729323299517337006624355974392672485130473835342440257957009587395863280744690190325138519398286554949147610421486321389284247000109055303947010031653070733222059162917567421802148621236574640852825655752928196479272289430660224123048236270087181554337487670286087914147009378110955369160683026861409816703195584075780003357440101963898186520740473488783435890059370245151771242749628206945902085335080156115951373890868942529258557516809048007476329537314904624386853467912895874563739274338361280256187230063069475912371368026705037238035686170988"))
| 63.586957
| 1,008
| 0.817436
| 110
| 2,925
| 21.736364
| 0.4
| 0.012547
| 0.021748
| 0.005855
| 0.011711
| 0.011711
| 0
| 0
| 0
| 0
| 0
| 0.8015
| 0.133675
| 2,925
| 45
| 1,009
| 65
| 0.142068
| 0.015727
| 0
| 0.068966
| 0
| 0
| 0.716097
| 0.704473
| 0
| 1
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.034483
| 0
| 0.137931
| 0.137931
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b6306e43a7a4cbda5d152dfb491c32c2c832aea5
| 11,508
|
py
|
Python
|
tests/test_processor.py
|
JiscSD/rdss-preservica-adaptor
|
cc8e7fc91e852cfdceb4074f3a59dfe9b08b4c44
|
[
"Apache-2.0"
] | 1
|
2019-02-19T14:26:59.000Z
|
2019-02-19T14:26:59.000Z
|
tests/test_processor.py
|
JiscSD/rdss-preservica-adaptor
|
cc8e7fc91e852cfdceb4074f3a59dfe9b08b4c44
|
[
"Apache-2.0"
] | 5
|
2021-03-31T19:00:36.000Z
|
2021-12-13T19:51:12.000Z
|
tests/test_processor.py
|
JiscSD/rdss-preservica-adaptor
|
cc8e7fc91e852cfdceb4074f3a59dfe9b08b4c44
|
[
"Apache-2.0"
] | null | null | null |
import base64
import boto3
import hashlib
import json
import moto
import pytest
from preservicaservice.processor import (
RecordProcessor,
)
from preservicaservice.config import (
Config,
)
def _get_records(client, stream_name):
shard_id = client.describe_stream(
StreamName=stream_name,
)['StreamDescription']['Shards'][0]['ShardId']
shard_iterator = client.get_shard_iterator(
StreamName=stream_name,
ShardId=shard_id,
ShardIteratorType='TRIM_HORIZON',
)['ShardIterator']
result = client.get_records(
ShardIterator=shard_iterator,
Limit=1000,
)
return result['Records']
@moto.mock_kinesis
def test_record_with_invalid_json_sends_message_to_error_stream():
client = boto3.client('kinesis', 'eu-west-1')
client.create_stream(StreamName='error-stream', ShardCount=1)
client.create_stream(StreamName='invalid-stream', ShardCount=1)
config = Config(
environment='test',
preservica_base_url='https://test_preservica_url',
input_stream_name='input-stream',
invalid_stream_name='invalid-stream',
error_stream_name='error-stream',
adaptor_aws_region='eu-west-1',
organisation_buckets={},
)
processor = RecordProcessor(config=config)
class FakeRecord():
data = base64.b64encode(b'{')
processor.process_records([FakeRecord()], None)
records = _get_records(client, 'error-stream')
assert len(records) == 1
message = json.loads(records[0]['Data'].decode('utf-8'))
assert message['messageHeader']['errorCode'] == 'GENERR007'
assert 'Malformed JSON' in message['messageHeader']['errorDescription']
assert set(message['messageHeader'].keys()) == {
'errorCode', 'errorDescription', 'errorDescription', 'messageHistory', 'messageType',
}
@moto.mock_kinesis
def test_record_with_invalid_rdss_message_sends_message_to_invalid_stream():
client = boto3.client('kinesis', 'eu-west-1')
client.create_stream(StreamName='error-stream', ShardCount=1)
client.create_stream(StreamName='invalid-stream', ShardCount=1)
config = Config(
environment='test',
preservica_base_url='https://test_preservica_url',
input_stream_name='input-stream',
invalid_stream_name='invalid-stream',
error_stream_name='error-stream',
adaptor_aws_region='eu-west-1',
organisation_buckets={},
)
processor = RecordProcessor(config=config)
class FakeRecord():
data = base64.b64encode(b'{"messageHeader":{},"messageBody":{}}')
processor.process_records([FakeRecord()], None)
records = _get_records(client, 'invalid-stream')
assert len(records) == 1
message = json.loads(records[0]['Data'].decode('utf-8'))
assert message['messageHeader']['errorCode'] == 'GENERR004'
assert 'Invalid, missing or corrupt headers' in message['messageHeader']['errorDescription']
assert set(message['messageHeader'].keys()) == {
'errorCode', 'errorDescription', 'errorDescription', 'messageHistory', 'messageType',
}
# TODO Remove to re-enable checksum and fsize validation
@pytest.mark.skip(reason='checksum validation disabled to allow processing of prod willow messages')
@moto.mock_s3
@moto.mock_kinesis
def test_record_with_invalid_checksum_sends_message_to_invalid_stream():
s3_resource = boto3.resource('s3', region_name='us-east-1')
s3_resource.create_bucket(Bucket='the-download-bucket')
obj = s3_resource.Object('the-download-bucket', 'the-download-key')
obj.put(Body=b'Some contents')
client = boto3.client('kinesis', 'eu-west-1')
client.create_stream(StreamName='error-stream', ShardCount=1)
client.create_stream(StreamName='invalid-stream', ShardCount=1)
config = Config(
environment='test',
preservica_base_url='https://test_preservica_url',
input_stream_name='input-stream',
invalid_stream_name='invalid-stream',
error_stream_name='error-stream',
adaptor_aws_region='eu-west-1',
organisation_buckets={
'98765': 's3://the-upload-bucket/',
},
)
processor = RecordProcessor(config=config)
class FakeRecord():
data = base64.b64encode(json.dumps({
'messageHeader': {
'messageType': 'MetadataCreate',
'messageId': 'the-message-id',
},
'messageBody': {
'objectUuid': 'the-id',
'objectOrganisationRole': [{
'organisation': {
'organisationJiscId': 98765,
},
'role': 'some-role-id',
}],
'objectFile': [{
# "fileUuid": "a3290140-18e1-506e-abec-61e31791e749",
'fileStorageLocation': 's3://the-download-bucket/the-download-key',
'fileStoragePlatform': {
'storagePlatformType': 1,
},
'fileName': 'the file name',
'fileChecksum': [{
'checksumType': 1,
'checksumValue': 'definitely-not-the-checksum',
}],
}],
},
}).encode('utf-8'))
processor.process_records([FakeRecord()], None)
records = _get_records(client, 'invalid-stream')
message = json.loads(records[0]['Data'].decode('utf-8'))
assert message['messageHeader']['errorCode'] == 'APPERRMET004'
assert 'A file did not match its checksum' in message['messageHeader']['errorDescription']
@moto.mock_s3
@moto.mock_kinesis
def test_record_with_valid_checksum_does_not_send_message_to_invalid_stream():
s3_resource = boto3.resource('s3', region_name='us-east-1')
s3_resource.create_bucket(Bucket='the-download-bucket')
obj = s3_resource.Object('the-download-bucket', 'the-download-key')
obj.put(Body=b'Some contents')
checksum = hashlib.md5()
checksum.update(b'Some contents')
client = boto3.client('kinesis', 'eu-west-1')
client.create_stream(StreamName='error-stream', ShardCount=1)
client.create_stream(StreamName='invalid-stream', ShardCount=1)
config = Config(
environment='test',
preservica_base_url='https://test_preservica_url',
input_stream_name='input-stream',
invalid_stream_name='invalid-stream',
error_stream_name='error-stream',
adaptor_aws_region='eu-west-1',
organisation_buckets={
'98765': 's3://the-upload-bucket/',
},
)
processor = RecordProcessor(config=config)
class FakeRecord():
data = base64.b64encode(json.dumps({
'messageHeader': {
'messageType': 'MetadataCreate',
'messageId': 'the-message-id',
},
'messageBody': {
'objectUuid': 'the-id',
'objectOrganisationRole': [{
'organisation': {
'organisationJiscId': 98765,
},
'role': 'some-role-id',
}],
'objectFile': [{
# "fileUuid": "a3290140-18e1-506e-abec-61e31791e749",
'fileStorageLocation': 's3://the-download-bucket/the-download-key',
'fileStoragePlatform': {
'storagePlatformType': 1,
},
'fileName': 'the file name',
'fileChecksum': [{
'checksumType': 1,
'checksumValue': checksum.hexdigest(),
}],
}],
},
}).encode('utf-8'))
processor.process_records([FakeRecord()], None)
records = _get_records(client, 'invalid-stream')
assert len(records) == 0
@moto.mock_kinesis
def test_record_unable_to_download_sends_messages_to_error_stream():
client = boto3.client('kinesis', 'eu-west-1')
client.create_stream(StreamName='error-stream', ShardCount=1)
client.create_stream(StreamName='invalid-stream', ShardCount=1)
config = Config(
environment='test',
preservica_base_url='https://test_preservica_url',
input_stream_name='input-stream',
invalid_stream_name='invalid-stream',
error_stream_name='error-stream',
adaptor_aws_region='eu-west-1',
organisation_buckets={
44: 's3://some-bucket/',
},
)
processor = RecordProcessor(config=config)
with open('tests/fixtures/create.json', 'rb') as fixture_file:
fixture = fixture_file.read()
class FakeRecord():
data = base64.b64encode(fixture)
processor.process_records([FakeRecord()], None)
records = _get_records(client, 'error-stream')
assert len(records) == 1
message = json.loads(records[0]['Data'].decode('utf-8'))
assert message['messageHeader']['errorCode'] == 'GENERR011'
assert 'Resource not found' in message['messageHeader']['errorDescription']
assert set(message['messageHeader'].keys()) == {
'errorCode', 'errorDescription', 'errorDescription',
'messageClass', 'messageId', 'messageHistory', 'messageType',
}
assert set(message['messageBody'].keys()) == {
'objectFile', 'objectUuid', 'objectOrganisationRole', 'objectTitle',
}
@moto.mock_s3
@moto.mock_kinesis
def test_record_samvera_prod_processes():
s3_resource = boto3.resource('s3', region_name='us-east-1')
s3_resource.create_bucket(Bucket='some-bucket')
client = boto3.client('kinesis', 'eu-west-1')
client.create_stream(StreamName='error-stream', ShardCount=1)
client.create_stream(StreamName='invalid-stream', ShardCount=1)
config = Config(
environment='test',
preservica_base_url='https://test_preservica_url',
input_stream_name='input-stream',
invalid_stream_name='invalid-stream',
error_stream_name='error-stream',
adaptor_aws_region='eu-west-1',
organisation_buckets={
747: 's3://some-bucket/',
},
)
processor = RecordProcessor(config=config)
with open('tests/fixtures/create_samvera_0.0.1-SNAPSHOT.json', 'rb') as fixture_file:
fixture = fixture_file.read()
class FakeRecord():
data = base64.b64encode(fixture)
processor.process_records([FakeRecord()], None)
records = _get_records(client, 'error-stream')
assert len(records) == 0
@moto.mock_kinesis
def test_record_figshare_prod_processes():
client = boto3.client('kinesis', 'eu-west-1')
client.create_stream(StreamName='error-stream', ShardCount=1)
client.create_stream(StreamName='invalid-stream', ShardCount=1)
config = Config(
environment='test',
preservica_base_url='https://test_preservica_url',
input_stream_name='input-stream',
invalid_stream_name='invalid-stream',
error_stream_name='error-stream',
adaptor_aws_region='eu-west-1',
organisation_buckets={
89: 's3://some-bucket/',
},
)
processor = RecordProcessor(config=config)
with open('tests/fixtures/create_figshare_1.json', 'rb') as fixture_file:
fixture = fixture_file.read()
class FakeRecord():
data = base64.b64encode(fixture)
processor.process_records([FakeRecord()], None)
records = _get_records(client, 'error-stream')
assert len(records) == 0
| 35.73913
| 100
| 0.63017
| 1,165
| 11,508
| 6.027468
| 0.154506
| 0.042296
| 0.013956
| 0.037881
| 0.833808
| 0.830105
| 0.830105
| 0.830105
| 0.818001
| 0.812589
| 0
| 0.023599
| 0.234098
| 11,508
| 321
| 101
| 35.850467
| 0.773088
| 0.01373
| 0
| 0.702206
| 0
| 0
| 0.255597
| 0.032611
| 0
| 0
| 0
| 0.003115
| 0.066176
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.113971
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b6a72cd4d3809327045665c87ea384e533f8c72a
| 22,252
|
py
|
Python
|
tests/test_processor.py
|
cnwarden/mico2
|
017c52ef4626649d162904f247f21eec768fafc3
|
[
"MIT"
] | null | null | null |
tests/test_processor.py
|
cnwarden/mico2
|
017c52ef4626649d162904f247f21eec768fafc3
|
[
"MIT"
] | null | null | null |
tests/test_processor.py
|
cnwarden/mico2
|
017c52ef4626649d162904f247f21eec768fafc3
|
[
"MIT"
] | null | null | null |
# coding:utf-8
from unittest import TestCase
from processor.common_processor import XueQiuProcessor
class TestProcessor(TestCase):
def setUp(self):
text = u'丁一熊先生是投资界骨灰级的老人了,无论在上海香港的职业圈子,还是在闽发桃仙的草根论坛,丁先生的声音从来都很受关注。投资长期的赢家,都需要一定的人生境界和情怀,当然得有格局,还要幽默,如此才能游走于刀剑江湖。之前整理过丁先生两篇文章,一篇是在一月初,讲的是“水到渠成的下跌”;另外一篇是讲资源股的大周期底部机会。一个月过去,基本都应验了,当然还只是短期,长期还得看,但也一定会赢。<br> 下面这些文字,是丁先生最近谈楼市的言论,我做了一些整理并加个小标题,这样看起来更整齐一些。稍微有点长,慢慢看,看完耽误了打炮你不会后悔的。---------------<br> <strong> 1、买房必须长周期</strong><br> 大盘会突破历史高点。所以,低迷时候要买入。 <br> 其实这20年房子,当时价格都是比你当时工资收入高很多的,<br> 一个发展增速这么快的国家,还是人才汇聚的都市,房价如果低迷就是机会<br> 很多人总是聪明,认为自己知道高低,其实你是不知道的<br> 因为聪明人,都想一击而中,一下就发大财,快速致富,<br> 房子你赌涨,杠杆赌在预期时间暴富,基本会被洗得干干净净<br> 股市也一样<br> <strong>买房子,是牺牲现在消费 去换取未来收益,必须是长周期</strong><br><strong> 意思是,长期,房产是增值的,但它决不是短期谋利的工具</strong><br><strong> 房子短炒,赌快速,没有好下场,我见太多玩房产玩死的,</strong><br> 一个个择时,择高低,拆解资金,呼啸而来呼啸而去基本最后全部被打垮<br> 不认可房子,认为房价如此之高必有崩溃,或者,生活为之付出,不值得。<br> <br> <strong> 2、房子其实是一种世俗生活</strong><br> 这种观点也不错。真的。 <br> 房子其实是一种世俗生活。你坚持另一种生活方式或者理念,也是一片精彩。<br> 《瓦尔登湖》,是梭罗独居瓦尔登湖畔的记录,描绘了他两年多时间里的所见、所闻和所思。<br> 大至四季交替造成的景色变化,小到两只蚂蚁的争斗,无不栩栩如生<br> 梭罗的时代,正是美国工业化迈向世界第一国的时代,无论环境的破坏,社会充满野心勃勃的财富观,年轻人的急功近利,贫富剧烈分化。。。<br> 都是梭罗,不愿意看到的,崇尚简朴生活,热爱大自然的风光。。。<br> 这边书,影响了一百年,发达国家年轻人的心<br> 历史上和这本书,齐名的,是黑塞的《悉达多》,意境深邃 <br> 所以,年轻,要多走走<br> 成家,责任下,适当规划家庭财富,但依然保持对世界的好奇和阅览<br><br> <strong> 3、房子购买是区域化的</strong><br> 炒房必死<br> 房子低迷,很多人不买,一涨,乱象横生<br> 股市低迷,很多人不买,一涨,乱象横生<br> 聪明人就是以为自己聪明<br> 凡是想快速,一把整赢人生的,都是聪明人,但赛程下来又有几个成器的?!<br> 关于房子,这是个很沉重的话题<br> 但它,是,一个财富快速分化的资产配置,你逃不掉。<br> <strong>中国至少有30个城市,是发展快速,人才涌入,教育医疗完善,商业发达的大都市。这也是,中国历史上,第一次规模性,迈入都市化和现代化进程</strong><br><strong> 在这一批城市,买房,长期持有,会赢</strong><br> 如果你是三线以下的,一定要去省城准备一套,为你未来或者孩子做准备<br> 如果你是省城,可以配置2套或以上,以准备你的孩子留在北京上海工 作和购房,你到时好做调配<br> 如果你就在北京,上海,那购房,就是必须去奋斗的,一定要买一套 <br> 房子购买,是区域化,以你生活和未来生活为原则<br> 这样,你更好估值和判断,包括租售管理<br> 这是总体战略<br> 房产投资,长期一定是大大增值的<br> 全世界货币增发 摆在那里,只要城市不沉沦<br> 这是,世界大都市发展的,普遍经济规律。<br> <br> 这个世界,上辈的财富积累,最后多以房产形式留存给家族<br> 纽约,伦敦等等市区的房子,很多都是财团委托的家族资产,一般都是这样<br> 因为新技术他们赶不上,一代商业风云有新一代的世界之王和弄潮儿<br> 老财富只有是通过房产去避免烟消云散,这是基本的防御 <br> 但房子,也是会跌宕起伏,估值这东西,和情绪化非常关联<br> 你们没炒房,我10年前写过一份调查,以上海温州为样板,追踪资金和炒客<br> 这几年,还有一个报告,贵州和鄂尔多斯的困局报告<br> 炒房很惨的<br> <br> 房子,你买了,它有可能跌一半或者再跌一半,为什么不呢?!<br> 那什么时候是最低呢?!<br> 如果股市你判断不出来最低,那房子价格也同样是如此。<br> 你不知道的!也没人知道。<br> 那,该采取怎样的策略来应对呢?!<br> <br> <strong>4、买房的基本哲学</strong><br> 我的投资观念是,你在中国,你是中国老百姓,<br> 那,房子和股票,就是一个很重要的资产配置<br> 这是我给大学教师培训的理财课,四个立论<br> 第一个立论,货币增发 是大趋势,把货币换成资产,<br> 资产增值是财富加速器,也是个人财富的最大分化器<br> 但必须要坚持长期和长期眼光,而不要着眼短期<br> 短期是未知的,也不重要的<br> 第二个立论,股票,房子有低迷期,在低迷期买入是你的运气<br> 其实,你是根本不知道股票和房子具体走势的<br> 走势是未知的,其实,也是不重要的<br> 第三个立论,现金流,才是决定我们买入和持有的基石<br> <strong>现金流的保障大于估值趋势判断 </strong><br><strong> 估值和趋势,有个大的轮廓就行</strong><br> 中国房子,大城市长时间会高估,人才和高收入的刚需,当然房价也会突然崩溃<br> 中国股市,熊市7年一般会有大牛市,所以3000点以下都是低点,当然跌到几百点也可以<br> 高估和崩溃,2000点和几百点<br> 看起来差之千里,但,都不重要<br> 你研究历史进程,就知道,世界多么诡异<br> 很多东西我们不知道的 <br> 认识无知,我们其实是对价格走势,基本无知的,认清这一点,非常重要 <br> 第四个,也是我的人生哲学:<br> 这世界,不是如你所愿,人生不如意十有八九,很多东西超过我们想象<br> “我们的每一笔投资,可能就是最坏的投资”,你要有这个心理准备和防范<br> 那我们怎么<br> 从长期投资看,它是明智的吗?<br> 我们有足够的每月每年现金流来保障吗?<br> 如果越来越低,我们可以再买吗?<br> 这就是,买入资产的思考。<br> 不要去想,房价是高还是低,股市点位判断<br> 你是否是长期眼光去配置它,你是否有持续钱可以支撑它(哪怕它下跌巨大)<br> 如果更低,有持续的钱买入,不是更好吗?!<br> 很多人,是因为只有一笔赌资,没有收入来源<br> 又想马上发大财<br> 可想他的状况是多么艰难<br> 必须,完成人生致命一击<br> 必须,绝对正确<br> 人生,历史,恰恰是变化多端的<br> 投资,是细水长流<br><br> <strong> 5、炒房比炒股惨</strong><br> 房子和股票真的很简单,<br> 让无数人完蛋就在于,他们以为知道房价股价的高低,然后用高杠杆来短期发大财<br> 虽然我们是大学老师,但我们是普通百姓,也是普通人<br> 长期投资,才是出路。增加自己的收入,投资才从容<br> 期望值越低,越有意想不到的收获<br> 所以。现金收入,才是你迈向快速财富的第一基石。<br> 所谓富人更富,强者更强,其实<br> 金融白领,医生,大学教师,商业高管。。。<br> 中国的中产阶级,工资收入族,才是这样,脚踏实地<br> 买房,防御 后,再长期持股<br> 不要去想一夜暴富<br> 不要去玩不正规的金融产品<br> 不要沉迷赌波动浪费青春<br> 这是我对,知识分子,说的。<br> <strong>有个炒房的大咖,风风雨雨</strong><strong>10几年,2013年终于亏完了,很多很多钱,过眼云烟</strong><br><strong> 他给我说,还是股市好,一直走牛,而且跌了放在那里,几年又涨上去了</strong><br><strong> 我很吃惊,我说你们居然是这么看股市的</strong><br><strong> 他说,他记得90年代股市才几百点,现在2000多点,多牛呀,这是2013年</strong><br><strong> 呵呵,炒房客,从97年到现在,7轮了,基本是全军覆没,比炒股惨</strong><br> <strong> 那种剧烈融资,资金链的灰色,房子别说跌,一旦不涨,价格停顿就会死一批炒家</strong><br><br> <strong> 6、投资是逆势的关键是你有持续收入</strong><br> 股市,房子。这些故事一轮一轮在上演<br> 这20年,每次有人问我,可不可以买房<br> 我说,房子是长期看涨,特别是中国的中心城市,它长期是有活力的<br> 关键是,你能否支付它的按揭,有没有足够的安全保障<br> 比如,按揭款每月1万,你家庭2口子总收入不到1万,老人生病,小孩还刚出生,你是支持不了的<br> 除非,你有更好的工作,更好的收入<br> 买房子,最重要,是,你的月收入能够支撑,<br> 这是衡量和保障你不会在房子价格乱象中崩溃的基础<br> 如果供贷有点点吃力是正常的,只要没大的后患(健康),<br> 你就,应该,去想办法赚更多钱,提升更好职位来加强保障和改善家庭财务<br> 知识分子是有办法的,你是社会主流,要多学,要勤奋,做有责任的家长和高级人才<br> 我第一套房还贷900,工资1800,很有压力,后来工资提高了,压力就小了<br> 如果房子不涨,还下跌,你根本不用慌张,你每月还按揭就是,你想多了也没用<br> 如果房子大跌,你也是卖不掉的,真的,都是聪明人。你卖给谁。你只有继续每月还按揭就是,你想多了也没用<br> 我成都买的房子有些5,6年都没涨,有天突然就涨了,一下涨很多,<br> <strong> 房子其实是快速涨1-2年然后压制停顿或者跌一下(送面积就是跌),未来哪天还有可能大跌</strong><br> <strong> 大跌都不要怕</strong><br><strong> 你只要有还贷能力,而不是买房,想到的是房子马上就涨,如此,你买房就清晰了</strong><br> 我们70年代出生的人,买房,也是陆陆续续,当又有积蓄,又有按揭支付能力的时候,就又买一套<br> 当很多房子自身又带来租金收益的时候,你的防御 就更强了<br> 我是基本知道,如果有人请我当高管,我一年收入有多少,我家庭房子负债要多少合适<br> 我们是中老年可以保守,年轻人对自己可以高估,前途无量。你看一下自己年龄和前途,就可以配置<br> <strong>中年人,房子有了,就可以进军股市,用股市来配置,用长期眼光来投资</strong><br> 年轻人的股市配置,国家应该有,更好的企业投资蓝筹股指的政策来为年轻人保障<br> 我们公司每年有200万用于年轻人的未来养老金,但,如果投资3000点的蓝筹股指,30年后收益是好过社保 ,真的<br> 总之,你有积蓄和每月按揭支付能力,而周围的人都不看好房子<br> 就是最好的机会<br> 股市也一样<br> 那些聪明人,都是,涨起来,一窝蜂去买<br> 或者问,可以买吗?!<br> 买了如果突然跌了<br> 他们就完了<br> 又一窝蜂亏损出局<br> <strong> 投资,是逆势的。关键是你有持续收入</strong><br><strong> 最关键,是你,有才干,大幅度提高自己收入和社会地位</strong><br><strong> 你就从容了。无论怎样投资</strong><br> 这就是资产快车<br><br> <strong> 7、 炒房灭富人,炒股灭穷人</strong><br> 聪明人,就是,认为自己能够判断高低点<br> 如果你非常有把握房子涨,你准备买几套?!拆借多少资金?!<br> 如果一套房子一个月转手赚100万<br> 你心情怎样,砰砰跳,对不<br> 有能力的聪明人巴不得拆借资金囤积10套,100套<br> 如果不涨了,资金链条就会吃掉你<br> 如果跌,一片一片消灭<br> 我见很多<br> <strong>炒房灭富人,炒股灭穷人,期货 灭聪明人</strong><br> 如果房产这次投机成功,那,下一轮又怎样?!<br> 价格的东西,政策的变化,都是扑朔迷离<br> 房价炒作,杠杆比股市高多了,上瘾,死的更快<br> 论坛很多人说,我炒房几十倍收益怎样,炒股还一年80倍呢,几万炒到几千万, 论坛是伟大的吧台<br> 但我们是普通老百姓,不能这样<br> 股市也是这样<br> 很多人有把握,知道最低点<br> 那你准备杠杆多少倍来赌反转<br> “越把握,越死”,这是我20年股市的经验<br> <strong> 现在买股,是因为低迷,长期看好</strong><br><strong> 你买就是,你判断不出最低点</strong><br><strong> 我前年1900点买指数,就是觉得可以买,但愿明年还可以买</strong><br><strong> 我今年买有色资源,就是觉得可以买,但愿明年还可以买</strong><br> <strong>大智若愚,志存高远,是一个知识分子的心境</strong><br> 对现实,你的奋斗,你的环境可以预期严酷点,生活没有一帆风顺<br> 但对未来却要乐观,人类史,多少坎坷也走过来了<br> 那些炒房的,甚至高利贷筹集资金买房,来赌快速涨,好脱手,<br> 如果脱不了手就死了,<br> 我无话可说,你反正就是个死人<br> 房子,如果你没有,那,一定要买<br> 这个没什么要说的,虽然难,但,努力吧<br><br> <strong> 8、房价大跌怎么办</strong><br> <strong>中国都市发展,你配置好几套房子来投资,你赢的可能很大</strong><br> <strong>但如果大跌,跌90%怎么办?!</strong><br><strong> 事实上,跌90%,你也失业了,因为肯定经济有大问题</strong> <br> 这是,我对大学老师上课,提出的问题<br> “你如果失业了,怎么办?!”<br> “你有什么本事和价值,你个人对社会有什么用“,这是我对大学老师说的<br> 他们都吓晕了。真的。回到本质,你有什么用,你凭什么本事赚钱 <br> 财富课<br> 就是,金融的本质是什么,大的方向战略配置,如果意外的对冲(这个以后讲,丁总一定不是笨蛋,傻买房子和股票)<br> 还有就是,怎样去增加个人收入,规划人生(你看,积极一点的人生观是有用的)<br> 所以,我会直接问,你有什么本事,你有什么价值<br> 如果,你失去资产,你怎么存活<br> 这需要你去思考,其实财富也只是一部分,人生意义,责任,快乐,担当,是一体的<br> <strong> 遇到学识的丁总,大学老师都是崩溃的,商业高管都是崩溃的</strong><br><strong> 只要淘股吧的,瞧不起丁总,毕竟一年80倍,而且丁总不会打板</strong><br> 房子和股票,是我们比较好的配置,也基本是唯一的,规范的,自己做主投资的配置<br> 这是中国人,目前的处境 <br> 房子,你有能力你买,股票,低迷阶段你买,然后一直持有<br> 再留有结余,把生活过好一点。<br> 最坏,经济状况,资产,全部没有了<br> 也没关系,你有情趣,你有底蕴,你有学识,你可以生活下去 <br> 我去过中东,大马士革呆了半年,曾经观察过叙利亚和伊拉克<br> 真的,一个战争灾难,动荡的祖国,是多么的艰难,什么资产,安详的活下去都不错<br> 我庆幸。我们中国,虽然有很多问题,但真的幸运 <br><br> <strong> 9、房子永远是量力而行</strong><br> 第一,不了解深圳,也不知道这个天价是怎么出来的。如果这个9万是普通房,那第一次买房的年轻人也买不起,换房还可以,高位移交,风险很高 <br> 第二,凡是疯狂上涨一窝蜂的东西,最好回避,这种突飞猛进,一定会有干预和处罚,所以炒房必死<br> 第三,还是有很多二线好城市没有启动。房子是量力而行,不是每个人必须有。一个高级医生和企业高管,收入和一个低学历小青年是不同的,所以,考虑也是不一样的。人家可以2套3套。 <br> 房子和股票,应该买,先房子和股票,<br> 因为房子投资,资金使用比股票好。按揭杠杆而且30年分摊很安全压力小,关键是中国都市化和中国人的房子偏爱,30个城市的房子本来就该高溢价<br> 这是3年前的话,去翻帖子。房子是基本防御 ,我建议有条件的可以买到被限购为止(政府不让你买就不要买)<br> 我说这活,是房子已经涨了10几年,我还说的。当时很多跟帖是嘲笑的,觉得大崩溃在即<br> 这2天,是有人问房子,我才再一次重复观点,不是我鼓吹要你买房子。房子永远是量力而行<br> <strong>我买房子的理念,不是它疯涨中,恰恰是想反,你要问自己,它跌一半你能够抗它10年20年吗?!</strong><br> 我买的每一套房,都是这个原则。我不会去赌博短期,也不会孤注一掷把血汗钱拿来全靠买这个房快速赚千万,来改变命运<br> 我说现在3000点是低点,你买没问题的,如果再跌,你还是买。大盘会越过8000高点,我知道你们会嘲笑,有什么关系<br> 聪明人觉得这个国家都完蛋了<br> 但我们老百姓,也只有在中国,我觉得资产配置就是这个逻辑<br> 股市房市有风险,风险就是,我们总是一窝蜂去赌,用高杠杆去赌快速赢,太想一夜暴富<br> <strong> 投资就是。逆势,这需要你独立思考。</strong><br><strong> 投资就是。现金流的保障,长期持有并持续买入好资产,不畏惧崩盘的深渊</strong><br> 我的,很多观点,是和大众相反的,我是思考和经历过的<br> <br> <strong> 10、平衡配置,要随时想到和预防</strong><br> 嗯。这次被身边很多朋友批评了。我说房子的话在网上流传。<br> 在一个房子焦躁而且乱涨阶段,不应该去说房子。<br> 我接受批评。真心的。<br> 第一,中国房子其实我是不懂得,它的高价真的有害。<br> 第二,大部分人有房的,实际上一直没做好,房子下跌周期的准备。<br> 量力而行,留有余地,准备艰难,是一个长期投资者的成熟心境,<br> 但很多买房的,做不到这点,如此,现在无论多风光,都将在下一个周期失去财富而艰难<br> 一定要记住。经济是周期的。<br> 防范经济周期和房子的剧烈波动(主要是下波动)<br> 就在于平衡配置。比如,多一点股市的长期配置(现在股市正好低),<br> 留一些短期理财产品,多一个和职业无关的爱好收入渠道<br> 减少房产负债杠杆,保障现金流在困难时不枯竭<br> 这都是,要随时,想到和预防。<br> 因为我们不是富人,不可能全球配置,那是扯淡(企业可以,出境旅游业 最有优势对冲)<br> 高收入者,总想移民,你考虑的只是静态,因为你的优势还是在国内,你去外面,大多会萎缩一文不值<br> 现在都市房产高估,有利于卖房作为小孩留学教育费用,这也是目前资产变现的功利之一<br><br><a href="http://xueqiu.com/S/SH000001" target="_blank">$上证指数(SH000001)$</a> <br><a href="http://xueqiu.com/n/小小辛巴" target="_blank">@小小辛巴</a><a href="http://xueqiu.com/n/东博老股民" target="_blank">@东博老股民</a><a href="http://xueqiu.com/n/唐朝" target="_blank">@唐朝</a><a href="http://xueqiu.com/n/唐史主任司马迁" target="_blank">@唐史主任司马迁</a><a href="http://xueqiu.com/n/安久套海通" target="_blank">@安久套海通</a><a href="http://xueqiu.com/n/西峯" target="_blank">@西峯</a> <a href="http://xueqiu.com/n/释老毛" target="_blank">@释老毛</a><a href="http://xueqiu.com/n/云蒙" target="_blank">@云蒙</a><a href="http://xueqiu.com/n/天道骑牛" target="_blank">@天道骑牛</a><a href="http://xueqiu.com/n/英科睿之鹰" target="_blank">@英科睿之鹰</a><a href="http://xueqiu.com/n/今日话题" target="_blank">@今日话题</a>'
self.test_doc = {
'raw': {
'id': 10000,
'text': text
}
}
self.processor = XueQiuProcessor()
def testProcess(self):
self.processor.process(self.test_doc)
def testProcessInstrument(self):
test_doc = {
'raw': {
'id': 10000,
'text': u'$上证指数(SH000001)$ $下证指数(SH000002)$'
}
}
self.processor.process(test_doc)
def testProcessDoc3(self):
test_doc = {
'raw': {
'user_id': 1,
'user':{
'screen_name':'test'
},
'id': 10000,
'text': u'<a href="http://xueqiu.com/S/SH000001" target="_blank">$上证指数(SH000001)$</a> $ <a href="http://xueqiu.com/S/02899" target="_blank">$紫金矿业(02899)$</a> <a href="http://xueqiu.com/S/SH600680" target="_blank">$上海普天(SH600680)$</a> $)$ <a href="http://xueqiu.com/S/SZ002751" target="_blank">$易尚展示(SZ002751)$</a> <a href="http://xueqiu.com/S/SZ000100" target="_blank">$TCL集团(SZ000100)$</a> <a href="http://xueqiu.com/S/SH601318" target="_blank">$中国平安(SH601318)$</a> <a href="http://xueqiu.com/S/SH600030" target="_blank">$中信证券(SH600030)$</a> <a href="http://xueqiu.com/S/SZ300104" target="_blank">$乐视网(SZ300104)$</a> <a href="http://xueqiu.com/S/SZ000001" target="_blank">$平安银行(SZ000001)$</a> <a href="http://xueqiu.com/n/不明真相的群众" target="_blank">@不明真相的群众</a> <a href="http://xueqiu.com/n/zangyn" target="_blank">@zangyn</a> <a href="http://xueqiu.com/n/小小辛巴" target="_blank">@小小辛巴</a> <a href="http://xueqiu.com/n/西点老A" target="_blank">@西点老A</a> <a href="http://xueqiu.com/n/释老毛" target="_blank">@释老毛</a> <a href="http://xueqiu.com/n/梁宏" target="_blank">@梁宏</a> <a href="http://xueqiu.com/n/心灯永续William" target="_blank">@心灯永续William</a> <a href="http://xueqiu.com/n/跟我走吧14" target="_blank">@跟我走吧14</a> <a href="http://xueqiu.com/n/火德星君" target="_blank">@火德星君</a> <a href="http://xueqiu.com/n/暴力猴" target="_blank">@暴力猴</a> <a href="http://xueqiu.com/n/上海电力敢死队" target="_blank">@上海电力敢死队</a> <a href="http://xueqiu.com/n/那一水的鱼" target="_blank">@那一水的鱼</a> <a href="http://xueqiu.com/n/虎鼎" target="_blank">@虎鼎</a> 我真的不希望这是真的 我也不知道企业买房干什么去 我只知道我附近的入住率肯定低于40%<a href="http://xueqiu.com/n/狂龙十八段" target="_blank">@狂龙十八段</a> <br><img class="ke_img" src="//xqimg.imedao.com/1534773392d18cd3f67dbb01.jpg!custom.jpg" />'
}
}
self.processor.process(test_doc)
def testSpace(self):
test_doc = {
'raw': {
'user_id': 1,
'user':{
'screen_name':'test'
},
'id': 10000,
'text': u'alphago http://www.pq.bb.com'
}
}
self.processor.process(test_doc)
def testNLPTask(self):
"""
宜安科技(300328)、百花村(600721)、泛海控股(000046)、宏达新材(002211)、南山控股(002314)、深基地B(200053)<br><br>复牌一览表:山水文化(600234)、澳柯玛(600336)、*ST兴业(600603)、金圆股份(000546)、宝鹰股份(002047)、泰和新材(002254)
not use the xueqiu format to point out instrument.
"""
pass
| 364.786885
| 18,917
| 0.703353
| 3,424
| 22,252
| 4.556659
| 0.263435
| 0.720933
| 0.876042
| 0.913729
| 0.564735
| 0.557429
| 0.536213
| 0.492373
| 0.205615
| 0.126266
| 0
| 0.017188
| 0.040446
| 22,252
| 61
| 18,918
| 364.786885
| 0.713516
| 0.010426
| 0
| 0.416667
| 0
| 0.041667
| 0.946099
| 0.854987
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.020833
| 0.041667
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fce8485aca2e663806054fc5b71edcd6c544a52b
| 5,306
|
py
|
Python
|
test.py
|
smith558/CA-Group-Project
|
3056ce993ec63232e552f198a9d4d0a6ebb2f7e8
|
[
"MIT"
] | 6
|
2020-03-06T19:10:08.000Z
|
2020-07-29T09:49:04.000Z
|
test.py
|
smith558/CA-Group-Project
|
3056ce993ec63232e552f198a9d4d0a6ebb2f7e8
|
[
"MIT"
] | null | null | null |
test.py
|
smith558/CA-Group-Project
|
3056ce993ec63232e552f198a9d4d0a6ebb2f7e8
|
[
"MIT"
] | null | null | null |
import unittest
from ac_math import RL_CIRCUIT
from ac_math import RC_CIRCUIT
from ac_math import RLC_CIRCUIT
class TestRl_circuit(unittest.TestCase):
def test_resistor_voltage(self):
volt = RL_CIRCUIT()
result = volt.get_resistor_voltage(3, 50)
self.assertEqual(result, 150)
def test_supply_voltage(self):
volt = RL_CIRCUIT()
result = int(volt.get_supply_voltage(200, 50))
self.assertEqual(result, 206)
def test_inductor_voltage(self):
volt = RL_CIRCUIT()
result = volt.get_inductor_voltage(3, 30)
self.assertEqual(result, 90)
def test_pahse_angle(self):
volt = RL_CIRCUIT()
result = round(volt.get_phase_angle(float(50), 29), 15)
self.assertEqual(result, 0.52558379355161)
def test_inductive_reactance(self):
volt = RL_CIRCUIT()
result = volt.get_inductive_reactance(100, 3)
self.assertEqual(result, 1884.9555921538758)
def test_circuit_current(self):
volt = RL_CIRCUIT()
result = volt.get_circuit_current(40, float(100))
self.assertEqual(result, 2.5)
def test_supply_frequency(self):
volt = RL_CIRCUIT()
result = int(volt.get_supply_frequency(3, 40))
self.assertEqual(result, 2)
def test_circuit_resistance(self):
volt = RL_CIRCUIT()
result = volt.get_circuit_resistance(100, 2)
self.assertEqual(result, 99.9799979995999)
def test_circuit_impedence(self):
volt = RL_CIRCUIT()
result = int(volt.get_circuit_impedence(19, 26))
self.assertEqual(result, 32)
class TestRC_circuit(unittest.TestCase):
def test_resistor_voltage(self):
volt = RC_CIRCUIT()
result = volt.get_resistor_voltage(float(1.2), 48)
self.assertEqual(result, 57.599999999999994)
def test_supply_voltage(self):
volt = RC_CIRCUIT()
result = int(volt.get_supply_voltage(200, 50))
self.assertEqual(result, 206)
def test_capacitive_voltage(self):
volt = RC_CIRCUIT()
result = volt.get_capacitive_voltage(3, 30)
self.assertEqual(result, 90)
def test_pahse_angle(self):
volt = RC_CIRCUIT()
result = volt.get_phase_angle(float(10), float(15))
self.assertEqual(result, 0.982793723247329)
def test_capacitive_reactance(self):
volt = RC_CIRCUIT()
result = volt.get_capacitive_reactance(100, float(0.001))
self.assertEqual(result, 1.5915494309189535)
def test_circuit_current(self):
volt = RC_CIRCUIT()
result = volt.get_circuit_current(40, float(100))
self.assertEqual(result, 2.5)
def test_supply_frequency(self):
volt = RC_CIRCUIT()
result = int(volt.get_supply_frequency(float(0.001), 40))
self.assertEqual(result, 3)
def test_circuit_resistance(self):
volt = RC_CIRCUIT()
result = volt.get_circuit_resistance(100, 2)
self.assertEqual(result, 99.9799979995999)
def test_circuit_impedence(self):
volt = RC_CIRCUIT()
result = int(volt.get_circuit_impedence(19, 26))
self.assertEqual(result, 32)
class TestRLC_circuit(unittest.TestCase):
def test_resistor_voltage(self):
volt = RLC_CIRCUIT()
result = volt.get_resistor_voltage(3, 50)
self.assertEqual(result, 150)
def test_supply_voltage(self):
volt = RLC_CIRCUIT()
result = int(volt.get_supply_voltage(200, 50, 68))
self.assertEqual(result, 200)
def test_inductor_voltage(self):
volt = RLC_CIRCUIT()
result = volt.get_inductor_voltage(3, 30)
self.assertEqual(result, 90)
def test_pahse_angle(self):
volt = RLC_CIRCUIT()
result = round(volt.get_phase_angle(float(50), 29, 40))
self.assertEqual(result, 1.0)
def test_inductive_reactance(self):
volt = RLC_CIRCUIT()
result = volt.get_inductive_reactance(100, 3)
self.assertEqual(result, 1884.9555921538758)
def test_circuit_current(self):
volt = RLC_CIRCUIT()
result = volt.get_circuit_current(40, float(100))
self.assertEqual(result, 2.5)
def test_supply_frequency_2(self):
volt = RLC_CIRCUIT()
result = int(volt.get_supply_frequency_2(3, 40))
self.assertEqual(result, 2)
def test_circuit_resistance(self):
volt = RLC_CIRCUIT()
result = volt.get_circuit_resistance(float(100), 22, 40)
self.assertEqual(result, 91.6515138991168)
def test_circuit_impedence(self):
volt = RLC_CIRCUIT()
result = int(volt.get_circuit_impedence(float(19), 26, 29))
self.assertEqual(result, 38)
def test_capacitive_reactance(self):
volt = RLC_CIRCUIT()
result = volt.get_capacitive_reactance(100, float(0.001))
self.assertEqual(result, 1.5915494309189535)
def test_capacitive_voltage(self):
volt = RLC_CIRCUIT()
result = volt.get_capacitive_voltage(3, 30)
self.assertEqual(result, 90)
def test_supply_frequency(self):
volt = RLC_CIRCUIT()
result = float(volt.get_supply_frequency(float(0.1), float(40)))
self.assertEqual(result, 0.039788735772973836)
if __name__ == '__main__':
unittest.main()
| 32.157576
| 72
| 0.663023
| 664
| 5,306
| 5.043675
| 0.115964
| 0.062705
| 0.188116
| 0.107495
| 0.884145
| 0.845327
| 0.758734
| 0.736339
| 0.664079
| 0.569424
| 0
| 0.088271
| 0.233509
| 5,306
| 164
| 73
| 32.353659
| 0.735186
| 0
| 0
| 0.767442
| 0
| 0
| 0.001508
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 1
| 0.232558
| false
| 0
| 0.031008
| 0
| 0.286822
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1e44f22d8de09e94a348c02ef000a447114a86ef
| 23,807
|
py
|
Python
|
testcases/generated/apigateway_test.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | 95
|
2018-06-05T10:49:32.000Z
|
2019-12-31T11:07:36.000Z
|
testcases/generated/apigateway_test.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | 22
|
2018-06-05T10:58:59.000Z
|
2020-07-31T12:13:19.000Z
|
testcases/generated/apigateway_test.py
|
Tanc009/jdcloud-cli
|
4e11de77c68501f44e7026c0ad1c24e5d043197e
|
[
"Apache-2.0"
] | 21
|
2018-06-04T12:50:27.000Z
|
2020-11-05T10:55:28.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
import unittest
import os
import json
class ApigatewayTest(unittest.TestCase):
def test_query_access_auths(self):
cmd = """python ../../main.py apigateway query-access-auths """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_access_auth(self):
cmd = """python ../../main.py apigateway create-access-auth """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_check_auth_exist(self):
cmd = """python ../../main.py apigateway check-auth-exist --access-key 'xxx' --auth-user-type 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_access_auth(self):
cmd = """python ../../main.py apigateway query-access-auth --access-auth-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_access_auth(self):
cmd = """python ../../main.py apigateway update-access-auth --access-auth-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_access_auth(self):
cmd = """python ../../main.py apigateway delete-access-auth --access-auth-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_bind_group_auth(self):
cmd = """python ../../main.py apigateway query-bind-group-auth --access-auth-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_bind_group_auth(self):
cmd = """python ../../main.py apigateway bind-group-auth --access-auth-id 'xxx' --deployment-ids 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_auth_group_list(self):
cmd = """python ../../main.py apigateway query-auth-group-list --auth-user-type 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_authorized_api_group_list(self):
cmd = """python ../../main.py apigateway authorized-api-group-list """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_access_keys(self):
cmd = """python ../../main.py apigateway query-access-keys """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_access_key(self):
cmd = """python ../../main.py apigateway create-access-key """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_check_key_exist(self):
cmd = """python ../../main.py apigateway check-key-exist --access-key 'xxx' --access-key-type 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_access_key(self):
cmd = """python ../../main.py apigateway query-access-key --access-key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_access_key(self):
cmd = """python ../../main.py apigateway update-access-key --access-key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_access_key(self):
cmd = """python ../../main.py apigateway delete-access-key --access-key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_bind_group_key(self):
cmd = """python ../../main.py apigateway query-bind-group-key --access-key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_bind_group_key(self):
cmd = """python ../../main.py apigateway bind-group-key --access-key-id 'xxx' --deployment-ids 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_key_group_list(self):
cmd = """python ../../main.py apigateway query-key-group-list --access-key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_apis(self):
cmd = """python ../../main.py apigateway query-apis --api-group-id 'xxx' --revision 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_apis(self):
cmd = """python ../../main.py apigateway create-apis --api-group-id 'xxx' --revision 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_check_api_name_exist(self):
cmd = """python ../../main.py apigateway check-api-name-exist --api-group-id 'xxx' --revision 'xxx' --api-name 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_api(self):
cmd = """python ../../main.py apigateway query-api --api-group-id 'xxx' --revision 'xxx' --api-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_api(self):
cmd = """python ../../main.py apigateway update-api --api-group-id 'xxx' --revision 'xxx' --api-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_api(self):
cmd = """python ../../main.py apigateway delete-api --api-group-id 'xxx' --revision 'xxx' --api-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_api_by_name(self):
cmd = """python ../../main.py apigateway update-api-by-name --api-group-id 'xxx' --revision 'xxx' --api-name 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_api_by_name(self):
cmd = """python ../../main.py apigateway delete-api-by-name --api-group-id 'xxx' --revision 'xxx' --api-name 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_api_groups(self):
cmd = """python ../../main.py apigateway describe-api-groups """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_api_group(self):
cmd = """python ../../main.py apigateway create-api-group --group-name 'xxx' --auth-type 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_check_group_name_exist(self):
cmd = """python ../../main.py apigateway check-group-name-exist --group-name 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_api_group(self):
cmd = """python ../../main.py apigateway describe-api-group --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_api_group_attribute(self):
cmd = """python ../../main.py apigateway modify-api-group-attribute --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_api_group(self):
cmd = """python ../../main.py apigateway delete-api-group --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_is_deploy_api_groups(self):
cmd = """python ../../main.py apigateway describe-is-deploy-api-groups """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_backend_config(self):
cmd = """python ../../main.py apigateway create-backend-config --api-group-id 'xxx' --environment 'xxx' --backend-service-type 'xxx' --sort '5'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_backend_configs(self):
cmd = """python ../../main.py apigateway describe-backend-configs --api-group-id 'xxx' --environment 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_backend_config(self):
cmd = """python ../../main.py apigateway describe-backend-config --api-group-id 'xxx' --backend-config-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_backend_config(self):
cmd = """python ../../main.py apigateway update-backend-config --api-group-id 'xxx' --backend-config-id 'xxx' --environment 'xxx' --backend-service-type 'xxx' --sort '5'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_backend_config(self):
cmd = """python ../../main.py apigateway delete-backend-config --api-group-id 'xxx' --backend-config-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_deployments(self):
cmd = """python ../../main.py apigateway describe-deployments --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_deploy(self):
cmd = """python ../../main.py apigateway deploy --api-group-id 'xxx' --revision 'xxx' --environment 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_deployment(self):
cmd = """python ../../main.py apigateway describe-deployment --api-group-id 'xxx' --deployment-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_offline(self):
cmd = """python ../../main.py apigateway offline --api-group-id 'xxx' --deployment-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_batch_offline(self):
cmd = """python ../../main.py apigateway batch-offline --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_keys(self):
cmd = """python ../../main.py apigateway query-keys """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_key(self):
cmd = """python ../../main.py apigateway create-key """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_reset_key(self):
cmd = """python ../../main.py apigateway reset-key --key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_key(self):
cmd = """python ../../main.py apigateway update-key --key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_key_info(self):
cmd = """python ../../main.py apigateway query-key-info --key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_rate_limit_policies(self):
cmd = """python ../../main.py apigateway query-rate-limit-policies """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_rate_limit_policy(self):
cmd = """python ../../main.py apigateway create-rate-limit-policy """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_check_policy_name(self):
cmd = """python ../../main.py apigateway check-policy-name --policy-name 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_rate_limit_policy(self):
cmd = """python ../../main.py apigateway query-rate-limit-policy --policy-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_rate_limit_policy(self):
cmd = """python ../../main.py apigateway update-rate-limit-policy --policy-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_rate_limit_policy(self):
cmd = """python ../../main.py apigateway delete-rate-limit-policy --policy-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_bind_group_policy(self):
cmd = """python ../../main.py apigateway query-bind-group-policy --policy-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_bind_group_policy(self):
cmd = """python ../../main.py apigateway bind-group-policy --policy-id 'xxx' --deployment-ids 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_policy_group_list(self):
cmd = """python ../../main.py apigateway query-policy-group-list --policy-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_revisions(self):
cmd = """python ../../main.py apigateway describe-revisions --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_revision(self):
cmd = """python ../../main.py apigateway create-revision --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_get_revision_ids(self):
cmd = """python ../../main.py apigateway get-revision-ids --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_check_revision_exist(self):
cmd = """python ../../main.py apigateway check-revision-exist --api-group-id 'xxx' --revision 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_revision(self):
cmd = """python ../../main.py apigateway query-revision --api-group-id 'xxx' --revision-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_revision(self):
cmd = """python ../../main.py apigateway modify-revision --api-group-id 'xxx' --revision-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_revision(self):
cmd = """python ../../main.py apigateway delete-revision --api-group-id 'xxx' --revision-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_subscription_keys(self):
cmd = """python ../../main.py apigateway query-subscription-keys """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_subscription_key(self):
cmd = """python ../../main.py apigateway create-subscription-key """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_subscription_key(self):
cmd = """python ../../main.py apigateway query-subscription-key --subscription-key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_subscription_key(self):
cmd = """python ../../main.py apigateway update-subscription-key --subscription-key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_subscription_key(self):
cmd = """python ../../main.py apigateway delete-subscription-key --subscription-key-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_uc_access_keys(self):
cmd = """python ../../main.py apigateway query-uc-access-keys """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_check_pin(self):
cmd = """python ../../main.py apigateway check-pin --pin 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_user_domains(self):
cmd = """python ../../main.py apigateway query-user-domains --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_user_domain(self):
cmd = """python ../../main.py apigateway create-user-domain --domain 'xxx' --api-group-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_user_domain(self):
cmd = """python ../../main.py apigateway delete-user-domain --domain-ids 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
| 33.961484
| 181
| 0.593313
| 2,919
| 23,807
| 4.76259
| 0.050702
| 0.037764
| 0.070134
| 0.091713
| 0.918573
| 0.915839
| 0.907064
| 0.854481
| 0.752122
| 0.686448
| 0
| 0.000629
| 0.265762
| 23,807
| 700
| 182
| 34.01
| 0.79468
| 0.026631
| 0
| 0.708885
| 0
| 0.060491
| 0.251609
| 0.027683
| 0
| 0
| 0
| 0
| 0.141777
| 1
| 0.141777
| false
| 0
| 0.005671
| 0
| 0.149338
| 0.141777
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1e8cc1027afeaaa604f07dff4c091f24ff8fd66f
| 12,595
|
py
|
Python
|
labfile2module.py
|
vikasinder/VS-Module-Demo
|
e57d215e2a24c275a356f408acafeed31a85603a
|
[
"MIT"
] | null | null | null |
labfile2module.py
|
vikasinder/VS-Module-Demo
|
e57d215e2a24c275a356f408acafeed31a85603a
|
[
"MIT"
] | null | null | null |
labfile2module.py
|
vikasinder/VS-Module-Demo
|
e57d215e2a24c275a356f408acafeed31a85603a
|
[
"MIT"
] | null | null | null |
# Class is declared That contains all the functions and variable
class Area_parameters_volume:
# Global single variable is declared that gets the calculated result
calculate = 0
# Option function is declared that is called on lab_file1 page
def option(self, value):
# Condition to check value to be calculated bt the user:
if(value == 1):
return (self.area())
elif(value == 2):
return (self.perimeter())
else:
return (self.volume())
# Area function is declared to calculate area and pass the calculated result on the screen
def area(self):
print("\n ****Calculate Area **** \n")
while True:
try:
# Global keyword is used , so as to declare that calculate is a global variable
global calculate
area = int(input(
"Choose what you want to calculate :\n\n Enter 1. Area of Square \n Enter 2. Area Of rectangle \n Enter 3. Area of circle \n Enter 0. To Exit : "))
if(area == 1):
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
side = float(input("\nEnter Side Of square : "))
if side < 0:
raise TypeError
else:
calculate = side ** 2
return calculate
except TypeError as error:
print(
f"\nInvalid Entry: {error}. Please provide a positive number.\n")
except ValueError as error:
print(
f"\nInvalid Entry: {error}. Please enter an integer or float value.\n")
elif(area == 2):
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
length = float(
input("\nEnter Length Of Rectangle : "))
width = float(
input("\nEnter width Of Rectangle : "))
if length < 0:
raise TypeError
if width < 0:
raise TypeError
else:
calculate = length*width
return calculate
except TypeError as error:
print(
f"\nInvalid Entry: {error}. Please provide a positive number.\n")
except ValueError as error:
print(
f"\nInvalid Entry:{error} Please enter an integer or float value.\n")
elif(area == 3):
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
radius = float(
input("\nEnter Radius Of circle : "))
if radius < 0:
raise TypeError
else:
calculate = radius ** 2 * 3.14
return calculate
except TypeError as error:
print(
f"\nInvalid Entry: {error}. Please provide a positive number.\n")
except ValueError as error:
print(
f"\nInvalid Entry:{error} Please enter an integer or float value.\n")
elif(area == 0):
return "Thanks For Using This Program"
else:
raise TypeError
except TypeError:
print("\nYou entered wrong Input. Please Try again.\n")
except ValueError:
print(
f"\nInvalid choice: {area} Please enter either 1 - 2 - 3 \n")
# Perimeter function is declared to calculate perimeter and pass the calculated result on the screen
def perimeter(self):
print("\n **** Calculate Perimeter **** \n")
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
# Global keyword is used , so as to declare that calculate is a global variable
global calculate
Perimeter = int(input(
"Choose what you want to calculate :\n\n Enter 1. Perimeter of Square \n Enter 2. Perimeter Of rectangle \n Enter 3. Perimeter of circle \n Enter 0. To Exit : "))
if(Perimeter == 1):
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
side = float(input("\nEnter Side Of square : "))
if side < 0:
raise TypeError
else:
calculate = side * 4
return calculate
except TypeError as error:
print(
f"\nInvalid Entry: {error}. Please provide a positive number.\n")
except ValueError as error:
print(
f"\nInvalid Entry: {error}. Please enter an integer or float value.\n")
elif(Perimeter == 2):
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
length = float(
input("\nEnter Length Of Rectangle : "))
width = float(
input("\nEnter width Of Rectangle : "))
if length < 0:
raise TypeError
if width < 0:
raise TypeError
else:
calculate = 2*(length+width)
return calculate
except TypeError as error:
print(
f"\nInvalid Entry: {error}. Please provide a positive number.\n")
except ValueError as error:
print(
f"\nInvalid Entry:{error} Please enter an integer or float value.\n")
elif(Perimeter == 3):
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
radius = float(
input("\nEnter Radius Of circle : "))
if radius < 0:
raise TypeError
else:
calculate = radius * 2 * 3.14
return calculate
except TypeError as error:
print(
f"\nInvalid Entry: {error}. Please provide a positive number.\n")
except ValueError as error:
print(
f"\nInvalid Entry: {error}. Please enter an integer or float value.\n")
elif(Perimeter == 0):
return "Thanks For Using This Program"
else:
raise TypeError
except TypeError:
print("\nYou entered wrong Input. Please Try again.\n")
except ValueError:
print(
f"\nInvalid choice: {Perimeter} Please enter either 1 - 2 - 3 \n")
# Volume function is declared to calculate volume and pass the calculated result on the screen
def volume(self):
print("\n **** Calculater Volume **** \n")
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
# Global keyword is used , so as to declare that calculate is a global variable
global calculate
volume = int(input(
"Choose what you want to calculate :\n\n Enter 1. Volume of Cube \n Enter 2. Volume Of Cylinder \n Enter 3. Volume of Sphere \n Enter 0. To Exit : "))
if(volume == 1):
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
side = float(input("\nEnter Side Of cube : "))
if side < 0:
raise TypeError
else:
calculate = side ** 3
return calculate
except TypeError as error:
print(
f"\nInvalid Entry: {error}. Please provide a positive number.\n")
except ValueError as error:
print(
f"\nInvalid Entry: {error}. Please enter an integer or float value.\n")
elif(volume == 2):
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
radius = float(
input("\nEnter Radius Of Cylinder : "))
height = float(
input("\nEnter height Of Cylinder : "))
if radius < 0:
raise TypeError
if height < 0:
raise TypeError
else:
calculate = 3.14 * radius ** 2 * height
return calculate
except TypeError as error:
print(
f"\nInvalid Entry: {error}. Please provide a positive number.\n")
except ValueError as error:
print(
f"\nInvalid Entry: {error}. Please enter an integer or float value.\n")
elif(volume == 3):
# Loop will run unless the conditions are not met ( To keep check on redundancy)
while True:
try:
radius = float(
input("\nEnter Radius Of Sphere : "))
if radius < 0:
raise TypeError
else:
calculate = (4/3) * 3.14 * radius**3
return calculate
except TypeError as error:
print(
f"\nInvalid Entry: {error}. Please provide a positive number.\n")
except ValueError as error:
print(
f"\nInvalid Entry: {error}. Please enter an integer or float value.\n")
elif(volume == 0):
# Returns "Thanks For Using This Program" if user wants to leave ther program
return "Thanks For Using This Program"
else:
# If the value entered is not the accepted values it will raise this error
raise TypeError
except TypeError:
print("\nYou entered wrong Input. Please Try again.\n")
except ValueError:
# If the value entered is not the accepted values it will raise this error
print(
f"\nInvalid choice: {volume} Please enter either 1 - 2 - 3 \n")
| 47.889734
| 183
| 0.424295
| 1,150
| 12,595
| 4.644348
| 0.108696
| 0.023591
| 0.055046
| 0.06759
| 0.856581
| 0.813705
| 0.810897
| 0.785808
| 0.770081
| 0.747613
| 0
| 0.011674
| 0.51711
| 12,595
| 262
| 184
| 48.072519
| 0.866491
| 0.146963
| 0
| 0.761905
| 0
| 0.014286
| 0.226887
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019048
| false
| 0
| 0
| 0
| 0.1
| 0.128571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
94e70e1ca4536cb533887ce0f1b2470fd4d66807
| 56,601
|
py
|
Python
|
drawPlot.py
|
svarthafnyra/CNN_Visualizations
|
a17615932519e67c7b7ec4ebaf030047dfd6d1e2
|
[
"MIT"
] | 17
|
2019-08-13T06:07:13.000Z
|
2021-03-02T22:14:21.000Z
|
drawPlot.py
|
svarthafnyra/CAMP-Project
|
a17615932519e67c7b7ec4ebaf030047dfd6d1e2
|
[
"MIT"
] | null | null | null |
drawPlot.py
|
svarthafnyra/CAMP-Project
|
a17615932519e67c7b7ec4ebaf030047dfd6d1e2
|
[
"MIT"
] | 3
|
2019-12-16T09:08:10.000Z
|
2020-02-19T10:43:25.000Z
|
from visualization import runExplain,runGGradCam,runGradCam,runVanillaBP,runsmoothGrad,runGBackProp
from misc_functions import get_params
from matplotlib import pyplot as plt
# for grad cam these are the outputs:
#original_image,gray,color,result,adversarial,gray2,color2,result2
# for explain:
#original_img,heat, mask,cam,adversarialpic,heat2, cam2
# for GBP:
#original_image, colorgrads,graygrads,possal, negsal, adversarial,colorgrads2,graygrads2,possal2,negsal2
# for GGCam:
# original_image, guidedgrad, grayguidedgrad, adversarial, guidedgrad2, grayguidedgrad2
# for smooth_grad:
#original_image,colorgrads,graygrads,adversarial,colorgrads2,graygrads2
# vanilla BP:
# original_image,vanilbp,grayvanilbp,adversarial,vanilbp2,grayvanilbp2
# photo index, network, visualization
def compareAttacks(vizmethod,
choose_network,
image_index,
training='',
structure=''):
isTrained = True
_,_,_,img_name,_ = get_params(image_index,choose_network,isTrained,training, structure)
attacks = ['FGSM','DeepFool','PGD','SalMap','LBFGS','RPGD']# , 'Boundary']#,'SinglePixel']
rows = 1+len(attacks)
fig = plt.figure()
fig.suptitle('Comparing Attacks:'+img_name+' - '+ vizmethod)
if vizmethod == 'Explain':
iters = 50
j = 1
for i in attacks:
original_img,heat, mask,cam,\
adversarialpic,heat2, mask2, cam2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals= runExplain(choose_network,isTrained,
training,structure,
image_index,iters,
attack_type=i)
if j == 1:
ax11 = fig.add_subplot(rows, 5, 1)
ax11.imshow(original_img)
ax11.set_title('Original Image')
ax1 = fig.add_subplot(rows, 5, 2)
ax1.imshow(heat)
ax1.set_title('Learned Mask Color')
ax2 = fig.add_subplot(rows, 5, 3)
ax2.imshow(mask)
ax2.set_title('Learned Mask Gray')
ax3 = fig.add_subplot(rows, 5, 4)
ax3.imshow(cam)
ax3.set_title('Cam Result')
ax9 = fig.add_subplot(rows, 5, 5)
ax9.bar(indices, orig_vals, align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs, rotation=45, ha="right")
ax12 = fig.add_subplot(rows, 5, 5*j+1)
ax12.imshow(adversarialpic)
ax12.set_title(i + ' Attack')
ax5 = fig.add_subplot(rows, 5, 5*j+2)
ax5.imshow(heat2)
ax6 = fig.add_subplot(rows, 5, 5*j+3)
ax6.imshow(mask2)
ax6.set_title('Adversary Mask Gray')
ax7 = fig.add_subplot(rows, 5, 5*j+4)
ax7.imshow(cam2)
ax7.set_title('Adversary Cam Result')
ax10 = fig.add_subplot(rows, 5, 5*(j+1))
ax10.bar(indices, adver_vals, align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs, rotation=45, ha="right")
j += 1
fig.set_size_inches(32, 9*rows)
fig.tight_layout()
fig.savefig('Comparing/AttackComp' +'_' +
img_name +'_' +
vizmethod + ' (' +
choose_network +'_' +
training + '_' +
structure + ' )', dpi=100)
elif vizmethod == 'GradCam':
j=1
for i in attacks:
print(i)
original_image,gray,color,result,adversarial,gray2,color2,result2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGradCam(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
if j == 1:
ax0 = fig.add_subplot(rows,5,1)
ax0.imshow(original_image)
ax0.set_title('Original Image')
ax1 = fig.add_subplot(rows,5,2)
ax1.imshow(gray)
ax1.set_title('Cam Grasycale')
ax2 = fig.add_subplot(rows,5,3)
ax2.imshow(color)
ax2.set_title('Cam HeatMap')
ax3 = fig.add_subplot(rows,5,4)
ax3.imshow(result)
ax3.set_title('Cam Result')
ax9 = fig.add_subplot(rows,5,5)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,5,5*j+1)
ax12.imshow(adversarial)
ax12.set_title(i + ' Attack')
ax4 = fig.add_subplot(rows,5,5*j+2)
ax4.imshow(gray2)
ax4.set_title('Adversary Cam Grasycale')
ax5 = fig.add_subplot(rows,5,5*j+3)
ax5.imshow(color2)
ax5.set_title('Adversary Cam HeatMap')
ax6 = fig.add_subplot(rows,5,5*j+4)
ax6.imshow(result2)
ax6.set_title('Adversary Cam Result')
ax10 = fig.add_subplot(rows,5,5*j+5)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j +=1
fig.set_size_inches(32, 9*rows)
fig.tight_layout()
fig.savefig('Comparing/AttackComp' +'_' +
img_name +'_' +
vizmethod + ' (' +
choose_network +'_' +
training + '_' +
structure + ' )', dpi=100)
elif vizmethod == 'GBP':
j =1
for i in attacks:
original_image, colorgrads,graygrads,possal, negsal, \
adversarial,colorgrads2,graygrads2,possal2,negsal2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGBackProp(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
if j ==1:
ax11 = fig.add_subplot(rows,6,1)
ax11.imshow(original_image)
ax11.set_title('Original Image')
ax1 = fig.add_subplot(rows, 6,2)
ax1.imshow(colorgrads)
ax1.set_title('Guided BP Color')
ax2 = fig.add_subplot(rows, 6, 3)
ax2.imshow(graygrads)
ax2.set_title( 'Guided BP Gray')
ax3 = fig.add_subplot(rows, 6, 4)
ax3.imshow(possal)
ax3.set_title('Positive Saliency')
ax4 = fig.add_subplot(rows, 6, 5)
ax4.imshow(negsal)
ax4.set_title('Negative Saliency')
ax9 = fig.add_subplot(rows, 6,6)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows, 6,6*j+1)
ax12.imshow(adversarial)
ax12.set_title(i + ' Attack')
ax5 = fig.add_subplot(rows, 6, 6*j+2)
ax5.imshow(colorgrads2)
ax5.set_title('Adversarial Guided BP Color')
ax6 = fig.add_subplot(rows, 6, 6*j+3)
ax6.imshow(graygrads2)
ax6.set_title('Adversarial'+ 'Guided BP Gray')
ax7 = fig.add_subplot(rows, 6, 6*j+4)
ax7.imshow(possal2)
ax7.set_title('Adversarial ''Positive Saliency')
ax8 = fig.add_subplot(rows, 6, 6*j+5)
ax8.imshow(negsal2)
ax8.set_title('Adversarial'+'Negative Saliency')
ax10 = fig.add_subplot(rows, 6,6*j+6)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 1
fig.set_size_inches(32, 9*rows)
fig.tight_layout()
fig.savefig('Comparing/AttackComp' +'_' +
img_name +'_' +
vizmethod + ' (' +
choose_network +'_' +
training + '_' +
structure + ' )', dpi=100)
elif vizmethod == 'GGradCam':
j = 1
for i in attacks:
original_image, guidedgrad, grayguidedgrad,\
adversarial, guidedgrad2, grayguidedgrad2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGGradCam(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
if j ==1:
ax0 = fig.add_subplot(rows,4,1)
ax0.imshow(original_image)
ax0.set_title('Original Image')
ax1 = fig.add_subplot(rows,4,2)
ax1.imshow(guidedgrad)
ax1.set_title('Guided Grad Cam')
ax2 = fig.add_subplot(rows,4,3)
ax2.imshow(grayguidedgrad)
ax2.set_title('Guided Grad Cam Grasycale')
ax9 = fig.add_subplot(rows,4,4)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,4,4*j+1)
ax12.imshow(adversarial)
ax12.set_title(i + ' Attack')
ax3 = fig.add_subplot(rows,4,4*j+2)
ax3.imshow(guidedgrad2)
ax3.set_title('Adversary Guided Grad Cam')
ax4 = fig.add_subplot(rows,4,4*j+3)
ax4.imshow(grayguidedgrad2)
ax4.set_title('Adversary Guided Grad Cam Grasycale')
ax10 = fig.add_subplot(rows,4,4*j+4)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
fig.set_size_inches(32, 9*rows)
fig.tight_layout()
fig.savefig('Comparing/AttackComp' +'_' +
img_name +'_' +
vizmethod + ' (' +
choose_network +'_' +
training + '_' +
structure + ' )', dpi=100)
elif vizmethod == 'SmoothGrad':
j=1
for i in attacks:
original_image,colorgrads,graygrads,\
adversarial,colorgrads2,graygrads2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runsmoothGrad(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
if j ==1:
ax0 = fig.add_subplot(rows,4,1)
ax0.imshow(original_image)
ax0.set_title('Original Image')
ax1 = fig.add_subplot(rows,4,2)
ax1.imshow(colorgrads)
ax1.set_title('Smooth BP')
ax2 = fig.add_subplot(rows,4, 3)
ax2.imshow(graygrads)
ax2.set_title('Smooth BP Gray')
ax9 = fig.add_subplot(rows,4,4)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,4,4*j+1)
ax12.imshow(adversarial)
ax12.set_title(i + ' Attack')
ax3 = fig.add_subplot(rows,4,4*j+2)
ax3.imshow(colorgrads2)
ax3.set_title('Adversary Smooth BP')
ax4 = fig.add_subplot(rows,4, 4*j+3)
ax4.imshow(graygrads2)
ax4.set_title('Adversary Smooth BP Gray')
ax10 = fig.add_subplot(rows,4,4*j+4)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 1
fig.set_size_inches(32, 9*rows)
fig.tight_layout()
fig.savefig('Comparing/AttackComp' +'_' +
img_name +'_' +
vizmethod + ' (' +
choose_network +'_' +
training + '_' +
structure + ' )', dpi=100)
elif vizmethod == 'VanillaBP':
j = 1
for i in attacks:
original_image,vanilbp,grayvanilbp,\
adversarial,vanilbp2,grayvanilbp2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runVanillaBP(choose_network,
isTrained,training,structure,
image_index,attack_type=i)
if j==1:
ax0 = fig.add_subplot(rows,4,1)
ax0.imshow(original_image)
ax0.set_title('Original Image')
ax1 = fig.add_subplot(rows,4,2)
ax1.imshow(vanilbp)
ax1.set_title('Vanilla BackProp')
ax2 = fig.add_subplot(rows,4,3)
ax2.imshow(grayvanilbp)
ax2.set_title('Vanilla BackProp GrayScale')
ax9 = fig.add_subplot(rows,4,4)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,4,j*4+1)
ax12.imshow(adversarial)
ax12.set_title(i + ' Attack')
ax3 = fig.add_subplot(rows,4,j*4+2)
ax3.imshow(vanilbp2)
ax3.set_title('Adversary Vanilla BackProp')
ax4 = fig.add_subplot(rows,4,j*4+3)
ax4.imshow(grayvanilbp2)
ax4.set_title('Adversary Vanilla BackProp GrayScale')
ax10 = fig.add_subplot(rows,4,j*4+4)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 1
fig.set_size_inches(32, 9*rows)
fig.tight_layout()
fig.savefig('Comparing/AttackComp' +'_' +
img_name +'_' +
vizmethod + ' (' +
choose_network +'_' +
training + '_' +
structure + ' )', dpi=100)
# *********************************************************************************************************
# *********************************************************************************************************
def compareNetworks(attackmethod,vizmethod, image_index, training='Normal'):
isTrained = True
i = attackmethod
_,_,_,img_name,_ = get_params(image_index,'AlexNet',isTrained) #dont mind this alexnet here.
networks = ('AlexNet','VGG19','ResNet50','Custom')
structures = ('ResNet50','VGG19')
numberofmodels = len(networks)+len(structures)-1
rows = 2*numberofmodels
fig = plt.figure()
fig.suptitle('Comparing Networks:'+img_name+' - '+ attackmethod)
j = 0
n = 0
s = 0
while n<len(networks) and s<len(structures):
choose_network = networks[n]
structure = structures[s]
if vizmethod == 'Explain':
iters = 100
original_img,heat, mask,cam,adversarialpic,heat2, mask2, cam2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals= runExplain(choose_network,isTrained,
training,structure,
image_index,iters,
attack_type=i)
ax11 = fig.add_subplot(rows,5, 5*j+1)
ax11.imshow(original_img)
ax11.set_title('Original Image. Method: '+ vizmethod)
ax1 = fig.add_subplot(rows,5, 5*j+2)
ax1.imshow(heat)
ax1.set_title('Learned Mask Color')
ax2 = fig.add_subplot(rows,5, 5*j+3)
ax2.imshow(mask)
ax2.set_title('Learned Mask Gray')
ax3 = fig.add_subplot(rows,5, 5*j+4)
ax3.imshow(cam)
ax3.set_title('Cam Result')
ax9 = fig.add_subplot(rows,5, 5*j+5)
ax9.bar(indices, orig_vals, align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs, rotation=45, ha="right")
ax12 = fig.add_subplot(rows,5, 5*j+6)
ax12.imshow(adversarialpic)
ax12.set_title('Adversarial Image')
ax5 = fig.add_subplot(rows,5, 5*j+7)
ax5.imshow(heat2)
ax6 = fig.add_subplot(rows,5, 5*j+8)
ax6.imshow(mask2)
ax6.set_title('Adversary Mask Gray')
ax7 = fig.add_subplot(rows,5, 5*j+9)
ax7.imshow(cam2)
ax7.set_title('Adversary Cam Result')
ax10 = fig.add_subplot(rows,5, 5*j+10)
ax10.bar(indices, adver_vals, align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs, rotation=45, ha="right")
j += 2
elif vizmethod == 'GradCam':
original_image,gray,color,result,adversarial,gray2,color2,result2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGradCam(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,5,5*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,5,5*j+2)
ax1.imshow(gray)
ax1.set_title('Cam Grasycale')
ax2 = fig.add_subplot(rows,5,5*j+3)
ax2.imshow(color)
ax2.set_title('Cam HeatMap')
ax3 = fig.add_subplot(rows,5,5*j+4)
ax3.imshow(result)
ax3.set_title('Cam Result')
ax9 = fig.add_subplot(rows,5,5*j+5)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,5,5*j+6)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax4 = fig.add_subplot(rows,5,5*j+7)
ax4.imshow(gray2)
ax4.set_title('Adversary Cam Grasycale')
ax5 = fig.add_subplot(rows,5,5*j+8)
ax5.imshow(color2)
ax5.set_title('Adversary Cam HeatMap')
ax6 = fig.add_subplot(rows,5,5*j+9)
ax6.imshow(result2)
ax6.set_title('Adversary Cam Result')
ax10 = fig.add_subplot(rows,5,5*j+10)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j +=2
elif vizmethod == 'GBP':
original_image, colorgrads,graygrads,possal, negsal, \
adversarial,colorgrads2,graygrads2,possal2,negsal2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGBackProp(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax11 = fig.add_subplot(rows,6,6*j+1)
ax11.imshow(original_image)
ax11.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,6,6*j+2)
ax1.imshow(colorgrads)
ax1.set_title('Guided BP Color')
ax2 = fig.add_subplot(rows,6, 6*j+3)
ax2.imshow(graygrads)
ax2.set_title( 'Guided BP Gray')
ax3 = fig.add_subplot(rows,6, 6*j+4)
ax3.imshow(possal)
ax3.set_title('Positive Saliency')
ax4 = fig.add_subplot(rows,6, 6*j+5)
ax4.imshow(negsal)
ax4.set_title('Negative Saliency')
ax9 = fig.add_subplot(rows,6,6*j+6)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,6,6*j+7)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax5 = fig.add_subplot(rows,6, 6*j+8)
ax5.imshow(colorgrads2)
ax5.set_title('Adversarial Guided BP Color')
ax6 = fig.add_subplot(rows,6, 6*j+9)
ax6.imshow(graygrads2)
ax6.set_title('Adversarial'+ 'Guided BP Gray')
ax7 = fig.add_subplot(rows,6, 6*j+10)
ax7.imshow(possal2)
ax7.set_title('Adversarial ''Positive Saliency')
ax8 = fig.add_subplot(rows,6, 6*j+11)
ax8.imshow(negsal2)
ax8.set_title('Adversarial'+'Negative Saliency')
ax10 = fig.add_subplot(rows,6,6*j+12)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
elif vizmethod == 'GGradCam':
original_image, guidedgrad, grayguidedgrad,\
adversarial, guidedgrad2, grayguidedgrad2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGGradCam(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,4,4*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,4,4*j+2)
ax1.imshow(guidedgrad)
ax1.set_title('Guided Grad Cam')
ax2 = fig.add_subplot(rows,4,4*j+3)
ax2.imshow(grayguidedgrad)
ax2.set_title('Guided Grad Cam Grasycale')
ax9 = fig.add_subplot(rows,4,4*j+4)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,4,4*j+5)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax3 = fig.add_subplot(rows,4,64*j+6)
ax3.imshow(guidedgrad2)
ax3.set_title('Adversary Guided Grad Cam')
ax4 = fig.add_subplot(rows,4,4*j+7)
ax4.imshow(grayguidedgrad2)
ax4.set_title('Adversary Guided Grad Cam Grasycale')
ax10 = fig.add_subplot(rows,4,4*j+8)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
elif vizmethod == 'SmoothGrad':
original_image,colorgrads,graygrads,\
adversarial,colorgrads2,graygrads2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runsmoothGrad(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,4,4*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,4,4*j+2)
ax1.imshow(colorgrads)
ax1.set_title('Smooth BP')
ax2 = fig.add_subplot(rows,4,4*j+3)
ax2.imshow(graygrads)
ax2.set_title('Smooth BP Gray')
ax9 = fig.add_subplot(rows,4,4*j+4)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,4,4*j+5)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax3 = fig.add_subplot(rows,4,4*j+6)
ax3.imshow(colorgrads2)
ax3.set_title('Adversary Smooth BP')
ax4 = fig.add_subplot(rows,4, 4*j+7)
ax4.imshow(graygrads2)
ax4.set_title('Adversary Smooth BP Gray')
ax10 = fig.add_subplot(rows,4,4*j+8)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
elif vizmethod == 'VanillaBP':
original_image,vanilbp,grayvanilbp,\
adversarial,vanilbp2,grayvanilbp2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runVanillaBP(choose_network,
isTrained,training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,4,4*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,4,4*j+2)
ax1.imshow(vanilbp)
ax1.set_title('Vanilla BackProp')
ax2 = fig.add_subplot(rows,4,4*j+3)
ax2.imshow(grayvanilbp)
ax2.set_title('Vanilla BackProp GrayScale')
ax9 = fig.add_subplot(rows,4,4*j+4)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,4,4*j+5)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax3 = fig.add_subplot(rows,4,4*j+6)
ax3.imshow(vanilbp2)
ax3.set_title('Adversary Vanilla BackProp')
ax4 = fig.add_subplot(rows,4,4*j+7)
ax4.imshow(grayvanilbp2)
ax4.set_title('Adversary Vanilla BackProp GrayScale')
ax10 = fig.add_subplot(rows,4,4*j+8)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
if choose_network=='Custom':
s += 1
else:
n += 1
fig.set_size_inches(32, 9*rows)
fig.tight_layout()
fig.savefig('Comparing/NetworkComp' +'_' +
img_name +'_' +
attackmethod + ' (' +
choose_network +'_' +
training + '_' +
structure + ' )', dpi=100)
# *********************************************************************************************************
# *********************************************************************************************************
def compareVisualizations(attackmethod,
choose_network,
image_index,
training='',
structure=''):
isTrained = True
i = attackmethod
_,_,_,img_name,_ = get_params(image_index,choose_network,isTrained,training, structure)
vizmethods = ['GradCam','GBP','GGradCam']#,'SmoothGrad','VanillaBP',Explain']
rows = 2*len(vizmethods)
fig = plt.figure()
fig.suptitle('Comparing Visualizations:'+img_name+' - '+ attackmethod)
j = 0
for vizmethod in vizmethods:
if vizmethod == 'Explain':
iters = 100
original_img,heat, mask,cam,adversarialpic,heat2, mask2, cam2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals= runExplain(choose_network,isTrained,
training,structure,
image_index,iters,
attack_type=i)
ax11 = fig.add_subplot(rows,6, 6*j+1)
ax11.imshow(original_img)
ax11.set_title('Original Image. Method: '+ vizmethod)
ax1 = fig.add_subplot(rows,6, 6*j+2)
ax1.imshow(heat)
ax1.set_title('Learned Mask Color')
ax2 = fig.add_subplot(rows,6, 6*j+3)
ax2.imshow(mask)
ax2.set_title('Learned Mask Gray')
ax3 = fig.add_subplot(rows,6, 6*j+4)
ax3.imshow(cam)
ax3.set_title('Cam Result')
ax9 = fig.add_subplot(rows,6, 6*j+6)
ax9.bar(indices, orig_vals, align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs, rotation=45, ha="right")
ax12 = fig.add_subplot(rows,6, 6*j+7)
ax12.imshow(adversarialpic)
ax12.set_title('Adversarial Image')
ax5 = fig.add_subplot(rows,6, 6*j+8)
ax5.imshow(heat2)
ax6 = fig.add_subplot(rows,6, 6*j+9)
ax6.imshow(mask2)
ax6.set_title('Adversary Mask Gray')
ax7 = fig.add_subplot(rows,6, 6*j+10)
ax7.imshow(cam2)
ax7.set_title('Adversary Cam Result')
ax10 = fig.add_subplot(rows,6, 6*j+12)
ax10.bar(indices, adver_vals, align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs, rotation=45, ha="right")
j += 2
elif vizmethod == 'GradCam':
original_image,gray,color,result,adversarial,gray2,color2,result2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGradCam(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,6,6*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,6,6*j+2)
ax1.imshow(gray)
ax1.set_title('Cam Grasycale')
ax2 = fig.add_subplot(rows,6,6*j+3)
ax2.imshow(color)
ax2.set_title('Cam HeatMap')
ax3 = fig.add_subplot(rows,6,6*j+4)
ax3.imshow(result)
ax3.set_title('Cam Result')
ax9 = fig.add_subplot(rows,6,6*j+6)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,6,6*j+7)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax4 = fig.add_subplot(rows,6,6*j+8)
ax4.imshow(gray2)
ax4.set_title('Adversary Cam Grasycale')
ax5 = fig.add_subplot(rows,6,6*j+9)
ax5.imshow(color2)
ax5.set_title('Adversary Cam HeatMap')
ax6 = fig.add_subplot(rows,6,6*j+10)
ax6.imshow(result2)
ax6.set_title('Adversary Cam Result')
ax10 = fig.add_subplot(rows,6,6*j+12)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j +=2
elif vizmethod == 'GBP':
original_image, colorgrads,graygrads,possal, negsal, \
adversarial,colorgrads2,graygrads2,possal2,negsal2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGBackProp(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax11 = fig.add_subplot(rows,6,6*j+1)
ax11.imshow(original_image)
ax11.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,6,6*j+2)
ax1.imshow(colorgrads)
ax1.set_title('Guided BP Color')
ax2 = fig.add_subplot(rows,6, 6*j+3)
ax2.imshow(graygrads)
ax2.set_title( 'Guided BP Gray')
ax3 = fig.add_subplot(rows,6, 6*j+4)
ax3.imshow(possal)
ax3.set_title('Positive Saliency')
ax4 = fig.add_subplot(rows,6, 6*j+5)
ax4.imshow(negsal)
ax4.set_title('Negative Saliency')
ax9 = fig.add_subplot(rows,6,6*j+6)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,6,6*j+7)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax5 = fig.add_subplot(rows,6, 6*j+8)
ax5.imshow(colorgrads2)
ax5.set_title('Adversarial Guided BP Color')
ax6 = fig.add_subplot(rows,6, 6*j+9)
ax6.imshow(graygrads2)
ax6.set_title('Adversarial'+ 'Guided BP Gray')
ax7 = fig.add_subplot(rows,6, 6*j+10)
ax7.imshow(possal2)
ax7.set_title('Adversarial ''Positive Saliency')
ax8 = fig.add_subplot(rows,6, 6*j+11)
ax8.imshow(negsal2)
ax8.set_title('Adversarial'+'Negative Saliency')
ax10 = fig.add_subplot(rows,6,6*j+12)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
elif vizmethod == 'GGradCam':
original_image, guidedgrad, grayguidedgrad,\
adversarial, guidedgrad2, grayguidedgrad2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGGradCam(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,6,6*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,6,6*j+2)
ax1.imshow(guidedgrad)
ax1.set_title('Guided Grad Cam')
ax2 = fig.add_subplot(rows,6,6*j+3)
ax2.imshow(grayguidedgrad)
ax2.set_title('Guided Grad Cam Grasycale')
ax9 = fig.add_subplot(rows,6,6*j+6)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,6,6*j+7)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax3 = fig.add_subplot(rows,6,6*j+8)
ax3.imshow(guidedgrad2)
ax3.set_title('Adversary Guided Grad Cam')
ax4 = fig.add_subplot(rows,6,6*j+9)
ax4.imshow(grayguidedgrad2)
ax4.set_title('Adversary Guided Grad Cam Grasycale')
ax10 = fig.add_subplot(rows,6,6*j+12)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
elif vizmethod == 'SmoothGrad':
original_image,colorgrads,graygrads,\
adversarial,colorgrads2,graygrads2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runsmoothGrad(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,6,6*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,6,6*j+2)
ax1.imshow(colorgrads)
ax1.set_title('Smooth BP')
ax2 = fig.add_subplot(rows,6,6*j+3)
ax2.imshow(graygrads)
ax2.set_title('Smooth BP Gray')
ax9 = fig.add_subplot(rows,6,6*j+6)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,6,6*j+7)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax3 = fig.add_subplot(rows,6,6*j+8)
ax3.imshow(colorgrads2)
ax3.set_title('Adversary Smooth BP')
ax4 = fig.add_subplot(rows,6, 6*j+9)
ax4.imshow(graygrads2)
ax4.set_title('Adversary Smooth BP Gray')
ax10 = fig.add_subplot(rows,6,6*j+12)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
elif vizmethod == 'VanillaBP':
original_image,vanilbp,grayvanilbp,\
adversarial,vanilbp2,grayvanilbp2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runVanillaBP(choose_network,
isTrained,training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,6,6*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Method:' +vizmethod)
ax1 = fig.add_subplot(rows,6,6*j+2)
ax1.imshow(vanilbp)
ax1.set_title('Vanilla BackProp')
ax2 = fig.add_subplot(rows,6,6*j+3)
ax2.imshow(grayvanilbp)
ax2.set_title('Vanilla BackProp GrayScale')
ax9 = fig.add_subplot(rows,6,6*j+6)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,6,6*j+7)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image')
ax3 = fig.add_subplot(rows,6,6*j+8)
ax3.imshow(vanilbp2)
ax3.set_title('Adversary Vanilla BackProp')
ax4 = fig.add_subplot(rows,6,6*j+9)
ax4.imshow(grayvanilbp2)
ax4.set_title('Adversary Vanilla BackProp GrayScale')
ax10 = fig.add_subplot(rows,6,6*j+12)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
fig.set_size_inches(32, 9*rows)
fig.tight_layout()
fig.savefig('Comparing/VisualizationComp' +'_' +
img_name +'_' +
attackmethod + ' (' +
choose_network +'_' +
training + '_' +
structure + ' )', dpi=100)
# *********************************************************************************************************
# *********************************************************************************************************
def compareTraining(attackmethod,
vizmethod,
structure,
image_index):
isTrained = True
choose_network = 'Custom'
i = attackmethod
_,_,_,img_name,_ = get_params(image_index,'AlexNet',True) #dont mind this alexnet here.
numberofmodels = 4
rows = 2*numberofmodels
fig = plt.figure()
fig.suptitle('Comparing Networks:'+img_name+' - '+ attackmethod)
n = 0
j = 0
ifNoise = 'No'
while n<numberofmodels:
if n==0:
training = 'Normal'
elif n==1:
training = 'Adversarial'
elif n ==2 :
training = 'Normal'
ifNoise = 'Yes'
print('add noise to the image and then give it to the normally trained network')
elif n ==3 :
training = 'Adversarial'
ifNoise = 'Yes'
print('add noise to the image and then give it to the normally trained network')
if vizmethod == 'Explain':
iters = 100
original_img,heat, mask,cam,adversarialpic,heat2, mask2, cam2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals= runExplain(choose_network,isTrained,
training,structure,
image_index,iters,
attack_type=i)
ax11 = fig.add_subplot(rows,5, 5*j+1)
ax11.imshow(original_img)
ax11.set_title('Original Image. Training: ' + training + 'Noise:' +ifNoise+ vizmethod)
ax1 = fig.add_subplot(rows,5, 5*j+2)
ax1.imshow(heat)
ax1.set_title('Learned Mask Color')
ax2 = fig.add_subplot(rows,5, 5*j+3)
ax2.imshow(mask)
ax2.set_title('Learned Mask Gray')
ax3 = fig.add_subplot(rows,5, 5*j+4)
ax3.imshow(cam)
ax3.set_title('Cam Result')
ax9 = fig.add_subplot(rows,5, 5*j+5)
ax9.bar(indices, orig_vals, align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs, rotation=45, ha="right")
ax12 = fig.add_subplot(rows,5, 5*j+6)
ax12.imshow(adversarialpic)
ax12.set_title('Adversarial Image. Training: ' + training + 'Noise:' +ifNoise)
ax5 = fig.add_subplot(rows,5, 5*j+7)
ax5.imshow(heat2)
ax6 = fig.add_subplot(rows,5, 5*j+8)
ax6.imshow(mask2)
ax6.set_title('Adversary Mask Gray')
ax7 = fig.add_subplot(rows,5, 5*j+9)
ax7.imshow(cam2)
ax7.set_title('Adversary Cam Result')
ax10 = fig.add_subplot(rows,5, 5*j+10)
ax10.bar(indices, adver_vals, align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs, rotation=45, ha="right")
j += 2
elif vizmethod == 'GradCam':
original_image,gray,color,result,adversarial,gray2,color2,result2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGradCam(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,5,5*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Training: ' + training + 'Noise:' +ifNoise +vizmethod)
ax1 = fig.add_subplot(rows,5,5*j+2)
ax1.imshow(gray)
ax1.set_title('Cam Grasycale')
ax2 = fig.add_subplot(rows,5,5*j+3)
ax2.imshow(color)
ax2.set_title('Cam HeatMap')
ax3 = fig.add_subplot(rows,5,5*j+4)
ax3.imshow(result)
ax3.set_title('Cam Result')
ax9 = fig.add_subplot(rows,5,5*j+5)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,5,5*j+6)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image. Training: ' + training + 'Noise:' +ifNoise)
ax4 = fig.add_subplot(rows,5,5*j+7)
ax4.imshow(gray2)
ax4.set_title('Adversary Cam Grasycale')
ax5 = fig.add_subplot(rows,5,5*j+8)
ax5.imshow(color2)
ax5.set_title('Adversary Cam HeatMap')
ax6 = fig.add_subplot(rows,5,5*j+9)
ax6.imshow(result2)
ax6.set_title('Adversary Cam Result')
ax10 = fig.add_subplot(rows,5,5*j+10)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j +=2
elif vizmethod == 'GBP':
original_image, colorgrads,graygrads,possal, negsal, \
adversarial,colorgrads2,graygrads2,possal2,negsal2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGBackProp(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax11 = fig.add_subplot(rows,6,6*j+1)
ax11.imshow(original_image)
ax11.set_title('Original Image. Training: ' + training + 'Noise:' +ifNoise +vizmethod)
ax1 = fig.add_subplot(rows,6,6*j+2)
ax1.imshow(colorgrads)
ax1.set_title('Guided BP Color')
ax2 = fig.add_subplot(rows,6, 6*j+3)
ax2.imshow(graygrads)
ax2.set_title( 'Guided BP Gray')
ax3 = fig.add_subplot(rows,6, 6*j+4)
ax3.imshow(possal)
ax3.set_title('Positive Saliency')
ax4 = fig.add_subplot(rows,6, 6*j+5)
ax4.imshow(negsal)
ax4.set_title('Negative Saliency')
ax9 = fig.add_subplot(rows,6,6*j+6)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,6,6*j+7)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image. Training: ' + training + 'Noise:' +ifNoise)
ax5 = fig.add_subplot(rows,6, 6*j+8)
ax5.imshow(colorgrads2)
ax5.set_title('Adversarial Guided BP Color')
ax6 = fig.add_subplot(rows,6, 6*j+9)
ax6.imshow(graygrads2)
ax6.set_title('Adversarial'+ 'Guided BP Gray')
ax7 = fig.add_subplot(rows,6, 6*j+10)
ax7.imshow(possal2)
ax7.set_title('Adversarial ''Positive Saliency')
ax8 = fig.add_subplot(rows,6, 6*j+11)
ax8.imshow(negsal2)
ax8.set_title('Adversarial'+'Negative Saliency')
ax10 = fig.add_subplot(rows,6,6*j+12)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
elif vizmethod == 'GGradCam':
original_image, guidedgrad, grayguidedgrad,\
adversarial, guidedgrad2, grayguidedgrad2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runGGradCam(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,4,4*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Training: ' + training + 'Noise:' +ifNoise +vizmethod)
ax1 = fig.add_subplot(rows,4,4*j+2)
ax1.imshow(guidedgrad)
ax1.set_title('Guided Grad Cam')
ax2 = fig.add_subplot(rows,4,4*j+3)
ax2.imshow(grayguidedgrad)
ax2.set_title('Guided Grad Cam Grasycale')
ax9 = fig.add_subplot(rows,4,4*j+4)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,4,4*j+5)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image. Training: ' + training + 'Noise:' +ifNoise)
ax3 = fig.add_subplot(rows,4,64*j+6)
ax3.imshow(guidedgrad2)
ax3.set_title('Adversary Guided Grad Cam')
ax4 = fig.add_subplot(rows,4,4*j+7)
ax4.imshow(grayguidedgrad2)
ax4.set_title('Adversary Guided Grad Cam Grasycale')
ax10 = fig.add_subplot(rows,4,4*j+8)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
elif vizmethod == 'SmoothGrad':
original_image,colorgrads,graygrads,\
adversarial,colorgrads2,graygrads2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runsmoothGrad(choose_network,isTrained,
training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,4,4*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Training: ' + training + 'Noise:' +ifNoise +vizmethod)
ax1 = fig.add_subplot(rows,4,4*j+2)
ax1.imshow(colorgrads)
ax1.set_title('Smooth BP')
ax2 = fig.add_subplot(rows,4,4*j+3)
ax2.imshow(graygrads)
ax2.set_title('Smooth BP Gray')
ax9 = fig.add_subplot(rows,4,4*j+4)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,4,4*j+5)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image. Training: ' + training + 'Noise:' +ifNoise)
ax3 = fig.add_subplot(rows,4,4*j+6)
ax3.imshow(colorgrads2)
ax3.set_title('Adversary Smooth BP')
ax4 = fig.add_subplot(rows,4, 4*j+7)
ax4.imshow(graygrads2)
ax4.set_title('Adversary Smooth BP Gray')
ax10 = fig.add_subplot(rows,4,4*j+8)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
elif vizmethod == 'VanillaBP':
original_image,vanilbp,grayvanilbp,\
adversarial,vanilbp2,grayvanilbp2,\
indices,orig_labs,orig_vals,adver_labs,adver_vals = runVanillaBP(choose_network,
isTrained,training,structure,
image_index,attack_type=i)
ax0 = fig.add_subplot(rows,4,4*j+1)
ax0.imshow(original_image)
ax0.set_title('Original Image. Training: ' + training + 'Noise:' +ifNoise +vizmethod)
ax1 = fig.add_subplot(rows,4,4*j+2)
ax1.imshow(vanilbp)
ax1.set_title('Vanilla BackProp')
ax2 = fig.add_subplot(rows,4,4*j+3)
ax2.imshow(grayvanilbp)
ax2.set_title('Vanilla BackProp GrayScale')
ax9 = fig.add_subplot(rows,4,4*j+4)
ax9.bar(indices,orig_vals,align='center', alpha=0.5)
ax9.set_title('Orignial Image Predictions')
ax9.set_xticks(indices)
ax9.set_xticklabels(orig_labs,rotation = 45,ha="right")
ax12 = fig.add_subplot(rows,4,4*j+5)
ax12.imshow(adversarial)
ax12.set_title('Adversarial Image. Training: ' + training + 'Noise:' +ifNoise)
ax3 = fig.add_subplot(rows,4,4*j+6)
ax3.imshow(vanilbp2)
ax3.set_title('Adversary Vanilla BackProp')
ax4 = fig.add_subplot(rows,4,4*j+7)
ax4.imshow(grayvanilbp2)
ax4.set_title('Adversary Vanilla BackProp GrayScale')
ax10 = fig.add_subplot(rows,4,4*j+8)
ax10.bar(indices,adver_vals,align='center', alpha=0.5)
ax10.set_title('Adversary Image Predictions')
ax10.set_xticks(indices)
ax10.set_xticklabels(adver_labs,rotation = 45,ha="right")
j += 2
n += 1
fig.set_size_inches(32, 9*rows)
fig.tight_layout()
fig.savefig('Comparing/TrainingComp' +'_' +
img_name +'_' +
attackmethod +'_' +
vizmethod +'('+
structure + ' )', dpi=100)
| 47.206839
| 110
| 0.528065
| 6,414
| 56,601
| 4.506704
| 0.032429
| 0.046496
| 0.10074
| 0.131737
| 0.959386
| 0.956999
| 0.953643
| 0.944406
| 0.938075
| 0.93427
| 0
| 0.057494
| 0.35222
| 56,601
| 1,199
| 111
| 47.206839
| 0.730888
| 0.023604
| 0
| 0.932666
| 0
| 0
| 0.111113
| 0.001267
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00364
| false
| 0
| 0.00273
| 0
| 0.006369
| 0.00273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a2062050b4f91a40bd542a06b6dc5f0e6cad99b8
| 35,071
|
py
|
Python
|
src/bounds.py
|
StanfordAHA/Configuration
|
a5d404433d32b0ac20544d5bafa9422c979afc16
|
[
"BSD-3-Clause"
] | null | null | null |
src/bounds.py
|
StanfordAHA/Configuration
|
a5d404433d32b0ac20544d5bafa9422c979afc16
|
[
"BSD-3-Clause"
] | null | null | null |
src/bounds.py
|
StanfordAHA/Configuration
|
a5d404433d32b0ac20544d5bafa9422c979afc16
|
[
"BSD-3-Clause"
] | null | null | null |
###############################################################################
# file -- bounds.py --
# Top contributors (to current version):
# Nestan Tsiskaridze
# This file is part of the configuration finder for the Stanford AHA project.
# Copyright (c) 2021 by the authors listed in the file AUTHORS
# in the top-level source directory) and their institutional affiliations.
# All rights reserved. See the file LICENSE in the top-level source
# directory for licensing information.
#
# Defines optional bound constraints imposed on a configuration.
# These are design specific constraints.
###############################################################################
import smt_switch as ss
import smt_switch.primops as po
from operators import *
#for each type of range read/write: (range_0 + 2) * (range_1 + 2) * …. * (range_i + 2) <= sequence_length for i < dimensionality.
def bound_range_prod_term (u, module_conf, write_ranges, CLK_CYCLES, symbols, solver):
solver.assert_formula(
solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(module_conf,0), solver.make_term(1, module_conf.get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUge, solver.make_term(CLK_CYCLES, write_ranges[0].get_sort()),
solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort()))),
solver.make_term(po.And,
solver.make_term(po.BVUle, solver.make_term(1, write_ranges[0].get_sort()),
solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort()))),
uaddo(u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort()),solver)
))))
solver.assert_formula(
solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(module_conf,0), solver.make_term(2, module_conf.get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUge, solver.make_term(CLK_CYCLES, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())))),
solver.make_term(po.And,
solver.make_term(po.BVUle, solver.make_term(1, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())))),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[1].get_sort())),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[0],0),solver.make_term(2,write_ranges[0].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[1],0),solver.make_term(2,write_ranges[1].get_sort()),solver),
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),solver)
)))
)))))
solver.assert_formula(
solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(module_conf,0), solver.make_term(3, module_conf.get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUge, solver.make_term(CLK_CYCLES, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort()))))),
solver.make_term(po.And,
solver.make_term(po.BVUle, solver.make_term(1, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort()))))),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[1].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[2].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort()))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[0],0),solver.make_term(2,write_ranges[0].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[1],0),solver.make_term(2,write_ranges[1].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[2],0),solver.make_term(2,write_ranges[2].get_sort()),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),solver),
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort()))),solver)
)))))
)))))))
solver.assert_formula(
solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(module_conf,0), solver.make_term(4, module_conf.get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUge, solver.make_term(CLK_CYCLES, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())))))),
solver.make_term(po.And,
solver.make_term(po.BVUle, solver.make_term(1, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())))))),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[1].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[2].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[3].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort()))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[0],0),solver.make_term(2,write_ranges[0].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[1],0),solver.make_term(2,write_ranges[1].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[2],0),solver.make_term(2,write_ranges[2].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[3],0),solver.make_term(2,write_ranges[3].get_sort()),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort()))),solver),
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())))),solver)
)))))))
)))))))))
solver.assert_formula(
solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(module_conf,0), solver.make_term(5, module_conf.get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUge, solver.make_term(CLK_CYCLES, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort()))))))),
solver.make_term(po.And,
solver.make_term(po.BVUle, solver.make_term(1, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort()))))))),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[1].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[2].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[3].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[4].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort()))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort()))))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[0],0),solver.make_term(2,write_ranges[0].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[1],0),solver.make_term(2,write_ranges[1].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[2],0),solver.make_term(2,write_ranges[2].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[3],0),solver.make_term(2,write_ranges[3].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[4],0),solver.make_term(2,write_ranges[4].get_sort()),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort()))),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())))),solver),
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort()))))),solver)
))))))))
))))))))))))
solver.assert_formula(
solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(module_conf,0), solver.make_term(6, module_conf.get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUge, solver.make_term(CLK_CYCLES, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[5],0), solver.make_term(2,write_ranges[5].get_sort())))))))),
solver.make_term(po.And,
solver.make_term(po.BVUle, solver.make_term(1, write_ranges[0].get_sort()),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[5],0), solver.make_term(2,write_ranges[5].get_sort())))))))),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[1].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[2].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[3].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[4].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(CLK_CYCLES, write_ranges[4].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[5],0), solver.make_term(2,write_ranges[5].get_sort()))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[5],0), solver.make_term(2,write_ranges[5].get_sort())))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[5],0), solver.make_term(2,write_ranges[5].get_sort()))))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
solver.make_term(po.BVUle,
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[5],0), solver.make_term(2,write_ranges[5].get_sort())))))),
solver.make_term(CLK_CYCLES, write_ranges[0].get_sort())),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[0],0),solver.make_term(2,write_ranges[0].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[1],0),solver.make_term(2,write_ranges[1].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[2],0),solver.make_term(2,write_ranges[2].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[3],0),solver.make_term(2,write_ranges[3].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[4],0),solver.make_term(2,write_ranges[4].get_sort()),solver),
solver.make_term(po.And,
uaddo(u.at_time(write_ranges[5],0),solver.make_term(2,write_ranges[5].get_sort()),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort()))),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())))),solver),
solver.make_term(po.And,
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort()))))),solver),
umulo(solver.make_term(po.BVAdd, u.at_time(write_ranges[0],0), solver.make_term(2,write_ranges[0].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[1],0), solver.make_term(2,write_ranges[1].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[2],0), solver.make_term(2,write_ranges[2].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[3],0), solver.make_term(2,write_ranges[3].get_sort())),
solver.make_term(po.BVMul, solver.make_term(po.BVAdd, u.at_time(write_ranges[4],0), solver.make_term(2,write_ranges[4].get_sort())),
solver.make_term(po.BVAdd, u.at_time(write_ranges[5],0), solver.make_term(2,write_ranges[5].get_sort())))))),solver)
)))))))))))
)))))))))))))
# Set 0 on values (of ranges and strides) indexed beyond dimensionality.
def set_zero_beyond_dimensionality (u, dim, dim_val, confs, symbols, solver):
for i in range(dim_val,len(confs)):
if "[" not in confs[i]:
conf = symbols[confs[i]]
solver.assert_formula(
solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(dim,0), solver.make_term(dim_val, dim.get_sort())),
solver.make_term(po.BVUle, u.at_time(conf,0), solver.make_term(2**dim_val-1, conf.get_sort()))))
elif int(confs[i][confs[i].find('[')+1:confs[i].find(']')],0) >= dim_val:
conf = symbols[confs[i]]
solver.assert_formula(
solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(dim,0), solver.make_term(dim_val, dim.get_sort())),
solver.make_term(po.Equal, u.at_time(conf,0), solver.make_term(0, conf.get_sort()))))
#print(solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(dim,0), solver.make_term(dim_val, dim.get_sort())), solver.make_term(po.Equal, u.at_time(conf,0), solver.make_term(0, conf.get_sort()))))
# Add bounds on ranges, strides, starting addresses, dimensionalities (design specific).
def bound_ranges_strides_st_addr_dim (u, add_bounds, agg_set, tb_set, sram_set, valid_in_cycle, CLK_CYCLES, dim_max_val, m, symbols, group_ids, config_names, stride_start_addr_ids, solver):
assert(add_bounds)
for id in group_ids:
for addr in m.groups_data["starting_addr"][id].keys():
if "sched" in addr:
st_addr = symbols[addr]
dim = list(m.groups_data["dimensionality"][id].values())
assert len(dim) == 1
solver.assert_formula(solver.make_term(po.Implies, solver.make_term(po.BVUgt, u.at_time(dim[0], 0), solver.make_term(0, dim[0].get_sort())), solver.make_term(po.BVUlt, u.at_time(st_addr,0), solver.make_term(CLK_CYCLES, st_addr.get_sort()))))
for conf in config_names:
if "ranges" in conf:
module_conf = symbols[conf]
solver.assert_formula(solver.make_term(po.BVUle, u.at_time(module_conf,0), solver.make_term(CLK_CYCLES, module_conf.get_sort())))
if "dimensionality" in conf:
#each dimensionality <= 6
module_conf = symbols[conf]
solver.assert_formula(solver.make_term(po.BVUle, u.at_time(module_conf,0), solver.make_term(dim_max_val, module_conf.get_sort())))
for id in m.groups_data["ranges"].keys():
if id in conf:
ranges = m.groups_data["ranges"][id]
for i in range(0,dim_max_val+1):
set_zero_beyond_dimensionality(u, module_conf, i, list(m.groups_data["ranges"][id].keys()), symbols, solver)
for id in m.groups_data["strides"].keys():
if id in conf:
for i in range(0,dim_max_val+1):
strides = m.groups_data["strides"][id]
set_zero_beyond_dimensionality(u, module_conf, i, list(m.groups_data["strides"][id].keys()), symbols, solver)
#i >= dimensionality --> range_i = 0
for id in m.groups_data["ranges"].keys():
if id in conf:
confs = list(m.groups_data["ranges"][id].values())
bound_range_prod_term (u, module_conf, confs, CLK_CYCLES, symbols, solver)
for id in m.groups_data["starting_addr"].keys():
if id in conf:
for addr in m.groups_data["starting_addr"][id].keys():
st_addr = symbols[addr]
solver.assert_formula(solver.make_term(po.Implies, solver.make_term(po.Equal, u.at_time(module_conf,0), solver.make_term(0, module_conf.get_sort())), solver.make_term(po.Equal, u.at_time(st_addr,0), solver.make_term(0, st_addr.get_sort()))))
if agg_set or tb_set or sram_set:
for keyword in stride_start_addr_ids.keys():
bw_val = None
if agg_set:
# stride_start_addr_ids = {
# "in2buf_autovec_write": ["port_sel_addr", "agg_read_addr_gen_1", "input_addr_gen", "agg_read_addr_gen_0", "input_sched_gen"], #read 2
# "in2buf_0": ["agg_write_addr_gen_0", "agg_write_sched_gen_0"], #write 3
# "in2buf_1": ["agg_write_addr_gen_1", "agg_write_sched_gen_1"], #write 3
bw_val = 2**2-1
if keyword in m.write_ids:
bw_val = 2**4-1#3
if tb_set:
# stride_start_addr_ids = {
## "buf2out_autovec_read": ["output_sched_gen", "output_addr_gen"], #write 2
# "buf2out_read_0": ["tb_read_addr_gen_0", "tb_read_sched_gen_0"], #read 3
# "buf2out_read_1": ["tb_read_sched_gen_1", "tb_read_addr_gen_1"] #read 3
# }
bw_val = 2**3-1
if keyword in m.write_ids:
bw_val = 2**1-1# since we separate both for writes and reads we don't need 2
if sram_set:
# stride_start_addr_ids = {
# "in2buf_autovec_write": ["port_sel_addr", "agg_read_addr_gen_1", "input_addr_gen", "agg_read_addr_gen_0", "input_sched_gen"], $write 9
# "buf2out_autovec_read": ["output_sched_gen", "output_addr_gen"] #read 9
# }
bw_val = 2**9-1
for id in stride_start_addr_ids[keyword]:
for addr in m.groups_data["starting_addr"][keyword].keys():
if "sched" not in addr:
st_addr = symbols[addr]
solver.assert_formula(solver.make_term(po.BVUle, u.at_time(st_addr,0), solver.make_term(bw_val, st_addr.get_sort())))
# if "strides" in conf:
# for id in group_ids:
# for id_stride in stride_start_addr_ids[id]:
# if id_stride in conf:
# m.groups_data["strides"][id][conf] = module_conf
# break
for id in stride_start_addr_ids[keyword]:
for strd in m.groups_data["strides"][keyword].keys():
if "sched" not in strd:
st_strd = symbols[strd]
solver.assert_formula(solver.make_term(po.BVUle, u.at_time(st_strd,0), solver.make_term(bw_val, st_strd.get_sort())))
#add a constraint: sched_starting_addr cannot be less than the first cycle the valid = 1
for id in group_ids:
dim_module = set(m.groups_data["dimensionality"][id].values())
assert len(dim_module) == 1
dim = dim_module.pop()
for addr in m.groups_data["starting_addr"][id].keys():
if "sched" in addr:
st_addr = symbols[addr]
solver.assert_formula(solver.make_term(po.Implies, solver.make_term(po.BVUgt, u.at_time(dim, 0), solver.make_term(0, dim.get_sort())), solver.make_term(po.BVUge, u.at_time(st_addr,0), solver.make_term(valid_in_cycle, st_addr.get_sort()))))
| 71.427699
| 266
| 0.699524
| 6,050
| 35,071
| 3.779174
| 0.029587
| 0.256735
| 0.35943
| 0.251924
| 0.924291
| 0.91397
| 0.903254
| 0.897262
| 0.890876
| 0.873819
| 0
| 0.026289
| 0.123635
| 35,071
| 490
| 267
| 71.573469
| 0.717521
| 0.065581
| 0
| 0.798544
| 0
| 0
| 0.005958
| 0
| 0
| 0
| 0
| 0
| 0.043689
| 1
| 0.007282
| false
| 0
| 0.007282
| 0
| 0.014563
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a20aa2c48906a72c37423ee4b6063b363bdaa7ea
| 2,299
|
py
|
Python
|
AdventOfCode2021/Day17/Day17.py
|
MattTitmas/AdventOfCode
|
36be4f6bf973f77ff93b08dc69c977bb11951f27
|
[
"MIT"
] | null | null | null |
AdventOfCode2021/Day17/Day17.py
|
MattTitmas/AdventOfCode
|
36be4f6bf973f77ff93b08dc69c977bb11951f27
|
[
"MIT"
] | null | null | null |
AdventOfCode2021/Day17/Day17.py
|
MattTitmas/AdventOfCode
|
36be4f6bf973f77ff93b08dc69c977bb11951f27
|
[
"MIT"
] | null | null | null |
def part1():
values = [[int(j) for j in i.split("=")[1].split("..")] for i in open("input.txt").read().split(": ")[1].split(", ")]
for i in range(len(values)):
values[i] = list(range(values[i][0], values[i][1]+1))
minX, maxX = min(values[0]), max(values[0])
minY, maxY = min(values[1]), max(values[1])
toReturn = float("-inf")
for x in range(0, maxX+1):
for y in range(-(minY-1), minY-1, -1):
overShot = False
hit = False
position = [0, 0]
velocity = [x, y]
highestY = float("-inf")
while not overShot and not hit:
position[0] += velocity[0]
position[1] += velocity[1]
highestY = max(highestY, position[1])
velocity[0] += (-1 if velocity[0] > 0 else (1 if velocity[0] < 0 else 0))
velocity[1] -= 1
if position[0] > maxX or position[1] < minY:
overShot = True
elif maxX >= position[0] >= minX and maxY >= position[1] >= minY:
toReturn = max(highestY, toReturn)
hit = True
return toReturn
def part2():
values = [[int(j) for j in i.split("=")[1].split("..")] for i in open("input.txt").read().split(": ")[1].split(", ")]
for i in range(len(values)):
values[i] = list(range(values[i][0], values[i][1]+1))
minX, maxX = min(values[0]), max(values[0])
minY, maxY = min(values[1]), max(values[1])
total = 0
for x in range(0, maxX+1):
for y in range(-(minY-1), minY-1, -1):
overShot = False
hit = False
position = [0, 0]
velocity = [x, y]
while not overShot and not hit:
position[0] += velocity[0]
position[1] += velocity[1]
velocity[0] += (-1 if velocity[0] > 0 else (1 if velocity[0] < 0 else 0))
velocity[1] -= 1
if position[0] > maxX or position[1] < minY:
overShot = True
elif maxX >= position[0] >= minX and maxY >= position[1] >= minY:
total += 1
hit = True
return total
print(f"answer to part1: {part1()}")
print(f"answer to part2: {part2()}")
| 37.688525
| 121
| 0.477599
| 305
| 2,299
| 3.6
| 0.163934
| 0.065574
| 0.040073
| 0.051002
| 0.81694
| 0.81694
| 0.81694
| 0.81694
| 0.81694
| 0.81694
| 0
| 0.051922
| 0.354937
| 2,299
| 60
| 122
| 38.316667
| 0.688469
| 0
| 0
| 0.769231
| 0
| 0
| 0.040017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0
| 0
| 0.076923
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a20b9e47fcfa6e66acf6493381ae8c45334eea94
| 3,244
|
py
|
Python
|
src/data/types/_date.py
|
Freonius/tranquillity
|
bb190b4a8facf643d5018a710100b3ff45d6d640
|
[
"MIT"
] | null | null | null |
src/data/types/_date.py
|
Freonius/tranquillity
|
bb190b4a8facf643d5018a710100b3ff45d6d640
|
[
"MIT"
] | 20
|
2021-12-31T15:28:20.000Z
|
2022-02-15T18:24:16.000Z
|
src/data/types/_date.py
|
Freonius/tranquillity
|
bb190b4a8facf643d5018a710100b3ff45d6d640
|
[
"MIT"
] | null | null | null |
from datetime import date
from typing import Union, Any
from graphene import DateTime as GqlDateTime, NonNull
from sqlalchemy import Column, Date as SqlDate
from ._dtype import DType
from ._nsdtype import NSDType
from ._datetime import _convert
class Date(DType[date]):
_t = date
_format: str = '%Y-%m-%d'
def iter_value(self) -> Union[str, None]:
if self.value is None:
return None
return self.value.strftime(self._format)
def _ggt(self) -> Any:
return GqlDateTime
def _value_setter(self, val: Union[date, None, str]) -> None:
val = _convert(val)
if val is not None:
val = val.date()
super()._value_setter(val)
def __init__(self,
value: Union[date, str, None] = None,
*,
format: Union[str, None] = '%Y-%m-%d',
field: Union[str, None] = None,
is_id: bool = False,
required: bool = True,
default: Union[date, str, None] = None,
nullable: bool = True, json_field: Union[str, None] = None) -> None:
value = _convert(value)
if value is not None:
value = value.date()
default = _convert(default)
if default is not None:
default = default.date()
if format is None:
format = '%Y-%m-%d'
self._format = format
super().__init__(field, value, is_id, required, default, nullable, json_field)
def get_sqlalchemy_column(self) -> Column:
return Column(
self.field, SqlDate,
default=self._default,
nullable=self._nullable,
primary_key=self.is_primary_key,
)
class NSDate(NSDType[date]):
_t = date
_format: str = '%Y-%m-%d'
def iter_value(self) -> Union[str, None]:
if self.value is None:
return None
return self.value.strftime(self._format)
def _ggt(self) -> Any:
return lambda **kwargs: NonNull(GqlDateTime, **kwargs)
def _value_setter(self, val: Union[date, None, str]) -> None:
val = _convert(val)
if val is not None:
val = val.date()
super()._value_setter(val)
def __init__(self,
value: Union[date, str, None] = None,
*,
format: Union[str, None] = '%Y-%m-%d',
field: Union[str, None] = None,
is_id: bool = False,
required: bool = True,
default: Union[date, str, None] = None,
json_field: Union[str, None] = None) -> None:
value = _convert(value)
if value is not None:
value = value.date()
default = _convert(default)
if default is not None:
default = default.date()
if format is None:
format = '%Y-%m-%d'
self._format = format
super().__init__(field, value, is_id, required, default, json_field)
def get_sqlalchemy_column(self) -> Column:
return Column(
self.field, SqlDate,
default=self._default,
nullable=self._nullable,
primary_key=self.is_primary_key,
)
| 31.803922
| 86
| 0.547164
| 381
| 3,244
| 4.480315
| 0.141732
| 0.057411
| 0.056239
| 0.037493
| 0.822496
| 0.822496
| 0.822496
| 0.822496
| 0.822496
| 0.822496
| 0
| 0
| 0.344328
| 3,244
| 101
| 87
| 32.118812
| 0.802539
| 0
| 0
| 0.804598
| 0
| 0
| 0.014797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114943
| false
| 0
| 0.08046
| 0.045977
| 0.356322
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bf63194649eafa56ce2037a17f715ed2843e82fc
| 1,746
|
py
|
Python
|
RandomForestController.py
|
iliaskaras/ML-BreastCancer
|
5e2bae2d6fd5e76d4164f8be58f9e2951d053d3c
|
[
"Apache-2.0"
] | null | null | null |
RandomForestController.py
|
iliaskaras/ML-BreastCancer
|
5e2bae2d6fd5e76d4164f8be58f9e2951d053d3c
|
[
"Apache-2.0"
] | null | null | null |
RandomForestController.py
|
iliaskaras/ML-BreastCancer
|
5e2bae2d6fd5e76d4164f8be58f9e2951d053d3c
|
[
"Apache-2.0"
] | 1
|
2020-08-31T22:20:46.000Z
|
2020-08-31T22:20:46.000Z
|
from RandomForestClf import RandomForestClf
#call maxDepth = 20 ================================================
randomForest = RandomForestClf()
randomForest.randomForestClassifier(20,"gini")
randomForest.getCrossValidation()
randomForest = None
randomForest = RandomForestClf()
randomForest.randomForestClassifier(20,"entropy")
randomForest.getCrossValidation()
#call maxDepth = 12 ================================================
randomForest = None
randomForest = RandomForestClf()
randomForest.randomForestClassifier(15,"gini")
randomForest.getCrossValidation()
randomForest = None
randomForest = RandomForestClf()
randomForest.randomForestClassifier(15,"entropy")
randomForest.getCrossValidation()
#call maxDepth = 8 ================================================
randomForest = None
randomForest = RandomForestClf()
randomForest.randomForestClassifier(10,"gini")
randomForest.getCrossValidation()
randomForest = None
randomForest = RandomForestClf()
randomForest.randomForestClassifier(10,"entropy")
randomForest.getCrossValidation()
#call maxDepth = 5 ================================================
randomForest = None
randomForest = RandomForestClf()
randomForest.randomForestClassifier(5,"gini")
randomForest.getCrossValidation()
randomForest = None
randomForest = RandomForestClf()
randomForest.randomForestClassifier(5,"entropy")
randomForest.getCrossValidation()
#call maxDepth = 2 ================================================
randomForest = None
randomForest = RandomForestClf()
randomForest.randomForestClassifier(2,"gini")
randomForest.getCrossValidation()
randomForest = None
randomForest = RandomForestClf()
randomForest.randomForestClassifier(2,"entropy")
randomForest.getCrossValidation()
| 23.28
| 68
| 0.705613
| 117
| 1,746
| 10.529915
| 0.153846
| 0.219156
| 0.316558
| 0.49513
| 0.922078
| 0.710227
| 0.710227
| 0.450487
| 0.450487
| 0
| 0
| 0.014277
| 0.07732
| 1,746
| 74
| 69
| 23.594595
| 0.750466
| 0.190149
| 0
| 0.725
| 0
| 0
| 0.03909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025
| 0
| 0.025
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bfa0044ec0927b1da62b05e87a782d8a3df356e8
| 153,855
|
py
|
Python
|
tests/data/cids.py
|
gavin-lin/c3-cli
|
21820fd9efb8ec35ed4dad61b7042a4132f63aae
|
[
"BSD-3-Clause"
] | null | null | null |
tests/data/cids.py
|
gavin-lin/c3-cli
|
21820fd9efb8ec35ed4dad61b7042a4132f63aae
|
[
"BSD-3-Clause"
] | 2
|
2019-11-05T03:54:48.000Z
|
2021-02-08T20:42:25.000Z
|
tests/data/cids.py
|
gavin-lin/c3-cli
|
21820fd9efb8ec35ed4dad61b7042a4132f63aae
|
[
"BSD-3-Clause"
] | 1
|
2021-07-14T02:28:58.000Z
|
2021-07-14T02:28:58.000Z
|
cids_before_shrink = \
{'201702-25401': {'audio_name': 'Lewisburg MROM 0',
'audio_pciid': '8086:a1f0',
'codename': 'Matira 5',
'form_factor': 'Desktop',
'kernel': '4.15.0-1027-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision Tower 7820',
'network': 'Intel - 8086:15b9',
'processor': 'Intel(R) Xeon(R) Platinum 8153 CPU @ 2.00GHz',
'video': 'nVidia - 10de:107c',
'wireless': ''},
'201703-25448': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Pearl',
'form_factor': 'Workstation (Desktop)',
'kernel': '4.15.0-1027-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision Rack 7920',
'network': 'Intel - 8086:1521, Intel - 8086:1521, Intel - '
'8086:1521, Intel - 8086:1521',
'processor': 'Intel(R) Xeon(R) Platinum 8276 CPU @ 2.20GHz',
'video': 'Matrox Electronics Systems Ltd. - 102b:0536',
'wireless': ''},
'201708-25695': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Italia 13',
'form_factor': 'Laptop',
'kernel': '4.15.0-1008-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'XPS 13 9370',
'network': '',
'processor': 'Intel(R) Core(TM) i5-7200U CPU @ 2.50GHz',
'video': 'Intel - 8086:5916',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201712-26015': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Italia 13',
'form_factor': 'Laptop',
'kernel': '4.15.0-1008-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'XPS 13 9370',
'network': '',
'processor': 'Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz',
'video': 'Intel - 8086:5917',
'wireless': 'Intel - 8086:24fd'},
'201712-26038': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Italia 13',
'form_factor': 'Laptop',
'kernel': '4.15.0-1008-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'XPS 13 9370',
'network': '',
'processor': 'Intel(R) Core(TM) i5-8350U CPU @ 1.70GHz',
'video': 'Intel - 8086:5917',
'wireless': 'Intel - 8086:24fd'},
'201806-26285': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Chelmsford',
'form_factor': 'Smart NIC',
'kernel': '4.18.0-1001-mellanox',
'location': 'ceqa',
'make': 'Mellanox Technologies',
'model': 'MBF1M332A',
'network': '',
'processor': 'aarch64',
'video': '',
'wireless': ''},
'201807-26295': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bucky N5 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1012-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5480',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d10',
'wireless': 'Intel - 8086:9df0'},
'201807-26296': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bucky N5 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1012-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5580',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d10',
'wireless': 'Intel - 8086:9df0'},
'201807-26297': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bucky N5 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1012-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5580',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i3-8145U CPU @ 2.10GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Atheros Communications - 168c:0036'},
'201807-26317': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Kylo Ren 15 MLK',
'form_factor': 'Laptop',
'kernel': '4.15.0-1012-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 7580',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d10',
'wireless': 'Intel - 8086:2526'},
'201807-26324': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Kylo Ren 13 MLK',
'form_factor': 'Laptop',
'kernel': '4.15.0-1012-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 7380',
'network': '',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:2526'},
'201807-26342': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Berlinetta P MLK CFL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1019-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Precision 5530',
'network': '',
'processor': 'Intel(R) Core(TM) i7-8850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1cbb',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201808-26364': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Punisher WHL',
'form_factor': 'All In One',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3480 AIO',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:174e',
'wireless': 'Intel - 8086:9df0'},
'201808-26365': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Punisher WHL',
'form_factor': 'All-In-One',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3280 AIO',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-8145U CPU @ 2.10GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201808-26380': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bucky V5 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1012-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 5481',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:174d',
'wireless': 'Intel - 8086:9df0'},
'201808-26381': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bucky V5 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1012-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 5481',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201808-26383': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bucky V5 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1012-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 5581',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-8145U CPU @ 2.10GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:174d',
'wireless': 'Atheros Communications - 168c:0036'},
'201808-26386': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'Finn-AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad A285',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 5 PRO 2500U w/ Radeon Vega Mobile '
'Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Intel - 8086:2526'},
'201808-26387': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'Windu2-AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad A485',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168, '
'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 5 PRO 2500U w/ Radeon Vega Mobile '
'Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Intel - 8086:2526'},
'201808-26397': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'LNA7-DVT1-A1',
'form_factor': 'Laptop',
'kernel': '4.15.0-1026-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 17 3785',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD Ryzen 3 2300U with Radeon Vega Mobile Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201808-26442': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'Loki N3 AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1026-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3585',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD Ryzen 3 2300U with Radeon Vega Mobile Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Atheros Communications - 168c:0036'},
'201808-26443': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'Loki N3 AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1026-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3585',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD Ryzen 5 2500U with Radeon Vega Mobile Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201808-26445': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'Loki N3 AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1026-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3785',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD Ryzen 5 2500U with Radeon Vega Mobile Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201809-26458': {'audio_name': 'GP107GL High Definition Audio Controller',
'audio_pciid': '10de:0fb9',
'codename': 'Chiron',
'form_factor': 'Laptop',
'kernel': '4.15.0-1025-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad P72',
'network': 'Intel - 8086:15bc',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1cba',
'wireless': 'Intel - 8086:a370'},
'201809-26462': {'audio_name': 'GP107GL High Definition Audio Controller',
'audio_pciid': '10de:0fb9',
'codename': 'Padme',
'form_factor': 'Laptop',
'kernel': '4.15.0-1025-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad P1',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-8850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1cbb',
'wireless': 'Intel - 8086:a370'},
'201809-26463': {'audio_name': 'GP107GL High Definition Audio Controller',
'audio_pciid': '10de:0fb9',
'codename': 'Padme',
'form_factor': 'Laptop',
'kernel': '4.15.0-1025-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad P1',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Xeon(R) E-2176M CPU @ 2.70GHz',
'video': 'Intel - 8086:3e94, nVidia - 10de:1cba',
'wireless': 'Intel - 8086:a370'},
'201809-26472': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'BRECKENRIDGE MLK 15P',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'oem',
'make': 'Dell',
'model': 'Precision 3530',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Xeon(R) E-2176M CPU @ 2.70GHz',
'video': 'Intel - 8086:3e94, nVidia - 10de:1cbc',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201809-26479': {'audio_name': 'GP107GL High Definition Audio Controller',
'audio_pciid': '10de:0fb9',
'codename': 'Padme',
'form_factor': 'Laptop',
'kernel': '4.15.0-1025-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad X1 Extreme',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-8400H CPU @ 2.50GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1c8c',
'wireless': 'Intel - 8086:a370'},
'201810-26502': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Pinehills',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'mainstream',
'make': 'Dell',
'model': 'Latitude 3300',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Pentium(R) CPU 4415U @ 2.30GHz',
'video': 'Intel - 8086:5906',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26503': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Pinehills',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 3300',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8250U CPU @ 1.60GHz',
'video': 'Intel - 8086:5917',
'wireless': 'Intel - 8086:24fd'},
'201810-26510': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Italia WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1027-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'XPS 13 9380',
'network': '',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26512': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Italia WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1027-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'XPS 13 9380',
'network': '',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26513': {'audio_name': '200 Series PCH HD Audio',
'audio_pciid': '8086:a2f0',
'codename': 'Z4G4',
'form_factor': 'Desktop',
'kernel': '4.15.0-1024-oem',
'location': 'mainstream',
'make': 'HP',
'model': 'Z4 G4 Workstation',
'network': 'Intel - 8086:15b7, Intel - 8086:1533',
'processor': 'Intel(R) Xeon(R) W-2145 CPU @ 3.70GHz',
'video': 'nVidia - 10de:1bb0',
'wireless': ''},
'201810-26515': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Italia WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1027-oem',
'location': 'mainstream',
'make': 'Dell',
'model': 'XPS 13 9380',
'network': '',
'processor': 'Intel(R) Core(TM) i3-8145U CPU @ 2.10GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:24fd'},
'201810-26516': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki N3 V3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 3481',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-7200U CPU @ 2.50GHz',
'video': 'Intel - 8086:5916',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201810-26517': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki N3 V3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 3481',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:5921',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26518': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki N3 V3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3481',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz',
'video': 'Intel - 8086:5916',
'wireless': 'Atheros Communications - 168c:0036'},
'201810-26519': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki N3 V3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3481',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:5921',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26520': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki N3 V3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3781',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i5-7200U CPU @ 2.50GHz',
'video': 'Intel - 8086:5916',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201810-26521': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki N3 V3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3781',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:5921',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26522': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'LOKI N3 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1023-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3482',
'network': '',
'processor': 'Intel(R) Celeron(R) N4000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3185',
'wireless': 'Intel - 8086:31dc'},
'201810-26523': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'LOKI N3 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1023-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3482',
'network': '',
'processor': 'Intel(R) Pentium(R) Silver N5000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3184',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26524': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'LOKI V3 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1023-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 3582',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Celeron(R) N4000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3185',
'wireless': 'Atheros Communications - 168c:0036'},
'201810-26525': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'LOKI N3 17',
'form_factor': 'Laptop',
'kernel': '4.15.0-1023-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3782',
'network': '',
'processor': 'Intel(R) Celeron(R) N4000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3185',
'wireless': 'Atheros Communications - 168c:0036'},
'201810-26526': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'LOKI N3 17',
'form_factor': 'Laptop',
'kernel': '4.15.0-1023-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3782',
'network': '',
'processor': 'Intel(R) Pentium(R) Silver N5000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3184',
'wireless': 'Intel - 8086:31dc'},
'201810-26529': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Loki V3 WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1024-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 3480',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-8145U CPU @ 2.10GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201810-26530': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Loki N3 WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1024-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3480',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201810-26531': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Loki V3 WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1024-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 3583',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201810-26532': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Loki V3 WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1024-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 3580',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:3ea0',
'wireless': 'Atheros Communications - 168c:0036'},
'201810-26533': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Loki N3 WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1024-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3780',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201810-26535': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Italia WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1027-oem',
'location': 'mainstream',
'make': 'Dell',
'model': 'XPS 13 9380',
'network': '',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26538': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki V3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 3584',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz',
'video': 'Intel - 8086:5921',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201810-26539': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'Loki V3 GLK',
'form_factor': 'Laptop',
'kernel': '4.15.0-1023-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 3582',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Pentium(R) Silver N5000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3184',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26540': {'audio_name': 'GP104 High Definition Audio Controller',
'audio_pciid': '10de:10f0',
'codename': 'ZBook 17 G5',
'form_factor': 'Laptop',
'kernel': '5.0.0-21-generic',
'location': 'mainstream',
'make': 'HP',
'model': 'ZBook 17 G5',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1bbb',
'wireless': 'Intel - 8086:a370'},
'201810-26550': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Loki N3 WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1024-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3583',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i3-8145U CPU @ 2.10GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201810-26551': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki N3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3584',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:5921',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26552': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'Loki N3 GLK',
'form_factor': 'Laptop',
'kernel': '4.15.0-1023-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3582',
'network': '',
'processor': 'Intel(R) Pentium(R) Silver N5000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3184',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201810-26554': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'Loki N3 GLK',
'form_factor': 'Laptop',
'kernel': '4.15.0-1023-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3582',
'network': '',
'processor': 'Intel(R) Celeron(R) N4000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3185',
'wireless': 'Intel - 8086:31dc'},
'201810-26555': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki N3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3581',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i5-7200U CPU @ 2.50GHz',
'video': 'Intel - 8086:5916',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201810-26556': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Loki N3 WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1024-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3580',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26595': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'Granger6U',
'form_factor': 'Laptop',
'kernel': '4.15.0-1028-oem',
'location': 'ceqa',
'make': 'HP',
'model': '240 G7 Notebook PC',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Celeron(R) N4000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3185',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:d723'},
'201810-26596': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'Granger6U',
'form_factor': 'Laptop',
'kernel': '4.15.0-1045-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'HP Laptop 14-ck0025la',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Celeron(R) N4000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3185',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:d723'},
'201810-26598': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'Granger6U',
'form_factor': 'Laptop',
'kernel': '4.15.0-1045-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'HP Laptop 14-ck0025la',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Pentium(R) Silver N5000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3184',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:d723'},
'201810-26602': {'audio_name': 'Unknown',
'audio_pciid': '1002:15b3',
'codename': 'Hagrid6U',
'form_factor': 'Laptop',
'kernel': '4.15.0-1028-oem',
'location': 'ceqa',
'make': 'HP',
'model': '245 G7 Notebook PC',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD E2-9000e RADEON R2, 4 COMPUTE CORES 2C+2G',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:98e4',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:d723'},
'201810-26611': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'BISON MLK MT',
'form_factor': 'Desktop',
'kernel': '4.15.0-1035-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'OptiPlex 7070',
'network': 'Intel - 8086:1533, Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i9-9900 CPU @ 3.10GHz',
'video': 'Intel - 8086:3e98',
'wireless': ''},
'201811-26617': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Gambit SFF CFLR',
'form_factor': 'Desktop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3470',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Celeron(R) G4930 CPU @ 3.20GHz',
'video': 'Intel - 8086:3e93',
'wireless': ''},
'201811-26620': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Kylin MT CFL-R',
'form_factor': 'Desktop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'ChengMing 3980',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-9100 CPU @ 3.60GHz',
'video': 'Intel - 8086:3e91',
'wireless': ''},
'201811-26621': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Tunnels',
'form_factor': 'Desktop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Precision Tower 3430',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-9600 CPU @ 3.10GHz',
'video': 'Intel - 8086:3e92',
'wireless': ''},
'201811-26622': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Tunnels',
'form_factor': 'Desktop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Precision Tower 3430',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i9-9900 CPU @ 3.10GHz',
'video': 'Intel - 8086:3e98',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201811-26628': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'D9-MFF-COUGAR',
'form_factor': 'Desktop',
'kernel': '4.15.0-1035-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'OptiPlex 3070',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Pentium(R) Gold G5420 CPU @ 3.80GHz',
'video': 'Intel - 8086:3e90',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201811-26630': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Eagle MT CFL-R',
'form_factor': 'Desktop',
'kernel': '4.15.0-1029-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 3670',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-9700 CPU @ 3.00GHz',
'video': 'Intel - 8086:3e98',
'wireless': 'Atheros Communications - 168c:0036'},
'201811-26631': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Eagle SFF CFL-R',
'form_factor': 'Desktop',
'kernel': '4.15.0-1029-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 3470',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Pentium(R) Gold G5420 CPU @ 3.80GHz',
'video': 'Intel - 8086:3e90',
'wireless': 'Intel - 8086:a370'},
'201811-26633': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Elk MLK',
'form_factor': 'Desktop',
'kernel': '4.15.0-1035-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'OptiPlex 5070',
'network': 'Intel - 8086:15bc',
'processor': 'Intel(R) Core(TM) i5-9600 CPU @ 3.10GHz',
'video': 'Intel - 8086:3e92',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201811-26634': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Gambit MLK MT CFL-R',
'form_factor': 'Desktop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3670',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-9400 CPU @ 2.90GHz',
'video': 'Intel - 8086:3e98',
'wireless': 'Atheros Communications - 168c:0036'},
'201811-26638': {'audio_name': 'Unknown',
'audio_pciid': '6549:2200',
'codename': 'Zuma-P CFL-R',
'form_factor': 'Desktop',
'kernel': '4.15.0-1030-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 3930 Rack',
'network': 'Aquantia Corp. - 1d6a:07b1, Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-9600K CPU @ 3.70GHz',
'video': 'Intel - 8086:3e98',
'wireless': ''},
'201811-26647': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Granger14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'Laptop 14-ck0xxx',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz',
'video': 'Intel - 8086:5916',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:d723'},
'201811-26648': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Eagle Light CFL-R',
'form_factor': 'Desktop',
'kernel': '4.15.0-1029-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 3070',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Pentium(R) Gold G5420 CPU @ 3.80GHz',
'video': 'Intel - 8086:3e93',
'wireless': ''},
'201811-26655': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Bighorn 21.5 AIO MLK',
'form_factor': 'All-In-One',
'kernel': '4.15.0-1034-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'OptiPlex 5260 AIO',
'network': 'Intel - 8086:15bc',
'processor': 'Intel(R) Pentium(R) Gold G5620 CPU @ 4.00GHz',
'video': 'Intel - 8086:3e91',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201811-26656': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Grizzly 24 AIO MLK',
'form_factor': 'All-In-One',
'kernel': '4.15.0-1034-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'OptiPlex 7460 AIO',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i3-9300 CPU @ 3.70GHz',
'video': 'Intel - 8086:3e91',
'wireless': 'Intel - 8086:a370'},
'201811-26676': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'V530-15ARR',
'form_factor': 'Desktop',
'kernel': '4.15.0-1030-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'V530 Desktop',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 3 PRO 2200G with Radeon Vega '
'Graphics',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': ''},
'201811-26677': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'V530-07ICB',
'form_factor': 'Desktop',
'kernel': '4.15.0-1031-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'V530 Desktop',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8400 CPU @ 2.80GHz',
'video': 'Intel - 8086:3e92',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:c821'},
'201811-26678': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': '705 G4 SFF',
'form_factor': 'Desktop',
'kernel': '4.15.0-1030-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'EliteDesk 705 G4 SFF',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 5 PRO 2400G with Radeon Vega '
'Graphics',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Intel - 8086:095a'},
'201811-26679': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': '705 G4 DM',
'form_factor': 'Desktop',
'kernel': '4.15.0-1030-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'HP EliteDesk 705 G4 DM',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 5 PRO 2400G with Radeon Vega '
'Graphics',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Intel - 8086:095a'},
'201811-26680': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': '800 G4 DM',
'form_factor': 'Desktop',
'kernel': '4.15.0-1034-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'EliteDesk 800 G4 DM',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-8700T CPU @ 2.40GHz',
'video': 'Intel - 8086:3e92',
'wireless': 'Intel - 8086:095a'},
'201812-26686': {'audio_name': 'TU102 High Definition Audio Controller',
'audio_pciid': '10de:10f7',
'codename': 'Matira 7 CAL',
'form_factor': 'Desktop',
'kernel': '4.15.0-1027-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7920 Tower',
'network': 'Intel - 8086:15b9, Intel - 8086:1533',
'processor': 'Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz',
'video': 'nVidia - 10de:1e30',
'wireless': ''},
'201812-26695': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Vulcan 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5590',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8300H CPU @ 2.30GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1c8c',
'wireless': 'Intel - 8086:a370'},
'201812-26696': {'audio_name': 'Unknown',
'audio_pciid': '10de:10f8',
'codename': 'Vulcan 15B',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'G7 7590',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:2502',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1e90',
'wireless': 'Intel - 8086:2526'},
'201812-26697': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Vulcan 17',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'G7 7790',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1c8c',
'wireless': 'Intel - 8086:a370'},
'201812-26698': {'audio_name': 'TU106 High Definition Audio Controller',
'audio_pciid': '10de:10f9',
'codename': 'Vulcan 17',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'mainstream',
'make': 'Dell',
'model': 'G7 7790',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:2502',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'nVidia - 10de:1f50',
'wireless': 'Intel - 8086:a370'},
'201812-26710': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bolt',
'form_factor': 'Laptop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5583',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:174d',
'wireless': 'Intel - 8086:9df0'},
'201812-26711': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bolt',
'form_factor': 'Laptop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5583',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i3-8145U CPU @ 2.10GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201812-26712': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bolt',
'form_factor': 'Laptop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5584',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201812-26713': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bolt',
'form_factor': 'Laptop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5584',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:174d',
'wireless': 'Atheros Communications - 168c:0036'},
'201812-26714': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bolt L',
'form_factor': 'Laptop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 3400',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-8145U CPU @ 2.10GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201812-26715': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bolt L',
'form_factor': 'Laptop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 3400',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:174d',
'wireless': 'Intel - 8086:9df0'},
'201812-26716': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bolt L',
'form_factor': 'Laptop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 3500',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201812-26717': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bolt L',
'form_factor': 'Laptop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 3500',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:174d',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201812-26719': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': '640 G4',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'ceqa',
'make': 'HP',
'model': '640 G4 Notebook PC',
'network': 'Intel - 8086:15d7',
'processor': 'Intel(R) Core(TM) i5-8250U CPU @ 1.60GHz',
'video': 'Intel - 8086:5917',
'wireless': 'Intel - 8086:24fd'},
'201812-26720': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Kylo2.0',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad L390',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201901-26797': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': '800 G4 SFF',
'form_factor': 'Desktop',
'kernel': '4.15.0-1034-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'EliteDesk 800 G4 SFF',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz',
'video': 'Intel - 8086:3e92',
'wireless': 'Intel - 8086:095a'},
'201901-26824': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'North Bay 13 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 5300',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201902-26838': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'WASP N5',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5390',
'network': '',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:9df0'},
'201902-26839': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'WASP N5',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 5390',
'network': '',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:9df0'},
'201902-26840': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'WASP V5',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 5390',
'network': '',
'processor': 'Intel(R) Core(TM) i3-8145U CPU @ 2.10GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:9df0'},
'201902-26842': {'audio_name': 'TU106 High Definition Audio Controller',
'audio_pciid': '10de:10f9',
'codename': 'Vulcan 15B DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'G7 7590',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:2502',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'nVidia - 10de:1f50',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201902-26843': {'audio_name': 'Unknown',
'audio_pciid': '10de:10f8',
'codename': 'Vulcan 17 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'G7 7790',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:2502',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1e90',
'wireless': 'Intel - 8086:2526'},
'201902-26851': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'North Bay 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 5400',
'network': 'Intel - 8086:15bd',
'processor': 'Intel(R) Core(TM) i5-8365U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201902-26858': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'NorthBay 15 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 5500',
'network': 'Intel - 8086:15bd',
'processor': 'Intel(R) Core(TM) i7-8665U CPU @ 1.90GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6987, Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201902-26859': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'NorthBay 15P DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Precision 3540',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6985, Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201902-26862': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Merion 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 7400',
'network': '',
'processor': 'Intel(R) Core(TM) i5-8365U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201902-26863': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Merion 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 7400',
'network': '',
'processor': 'Intel(R) Core(TM) i7-8665U CPU @ 1.90GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201902-26864': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Merion 13',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 7300',
'network': '',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201902-26865': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Northbay 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 5500',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201903-26895': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Brook Hollow 15P',
'form_factor': 'Laptop',
'kernel': '4.15.0-1033-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Precision 3541',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1cbd',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201903-26908': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Pearl CSL',
'form_factor': 'Workstation (Desktop)',
'kernel': '4.15.0-1027-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7920 Rack',
'network': 'Intel - 8086:1572, Intel - 8086:1572, Intel - '
'8086:1521, Intel - 8086:1521',
'processor': 'Intel(R) Xeon(R) Gold 5215L CPU @ 2.50GHz',
'video': 'Matrox Electronics Systems Ltd. - 102b:0536',
'wireless': ''},
'201903-26909': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Brook Hollow 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1033-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 5401',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-9400H CPU @ 2.50GHz',
'video': 'Intel - 8086:3e9b',
'wireless': 'Intel - 8086:a370'},
'201903-26910': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Brook Hollow 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1033-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 5401',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-9850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1d10',
'wireless': 'Intel - 8086:a370'},
'201903-26913': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Brook Hollow 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1033-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Latitude 5501',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-9850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b',
'wireless': 'Intel - 8086:a370'},
'201903-26914': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Brook Hollow 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1033-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Latitude 5501',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-9850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1c90',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201903-26915': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Brook Hollow P',
'form_factor': 'Laptop',
'kernel': '4.15.0-1033-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 3541',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Xeon(R) E-2276M CPU @ 2.80GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1cbd',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201903-26920': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Berlinetta P CFL-R',
'form_factor': 'Laptop',
'kernel': '4.15.0-1036-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 5540',
'network': '',
'processor': 'Intel(R) Xeon(R) E-2276M CPU @ 2.80GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1fb8',
'wireless': 'Intel - 8086:2526'},
'201903-26932': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Berlinetta P CFL-R',
'form_factor': 'Laptop',
'kernel': '4.15.0-1036-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 5540',
'network': '',
'processor': 'Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1fb8',
'wireless': 'Intel - 8086:2526'},
'201904-26937': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Berlinetta P CFL-R',
'form_factor': 'Laptop',
'kernel': '4.15.0-1036-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 5540',
'network': '',
'processor': 'Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1fb9',
'wireless': 'Intel - 8086:2723'},
'201904-26938': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Berlinetta P CFL-R',
'form_factor': 'Laptop',
'kernel': '4.15.0-1036-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 5540',
'network': '',
'processor': 'Intel(R) Core(TM) i5-9400H CPU @ 2.50GHz',
'video': 'Intel - 8086:3e9b',
'wireless': 'Intel - 8086:2723'},
'201904-26940': {'audio_name': 'Unknown',
'audio_pciid': '10de:10fa',
'codename': 'Whitehaven MLK 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7540',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-9850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1fb9',
'wireless': 'Intel - 8086:2723'},
'201904-26941': {'audio_name': 'TU106 High Definition Audio Controller',
'audio_pciid': '10de:10f9',
'codename': 'Whitehaven MLK 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7540',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1f36',
'wireless': 'Intel - 8086:2723'},
'201904-26970': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Drift',
'form_factor': 'Laptop',
'kernel': '4.15.0-1039-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad T490',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:9df0'},
'201904-26971': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Ironhide-T',
'form_factor': 'Laptop',
'kernel': '4.15.0-1039-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad T590',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:9df0'},
'201904-26973': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Jazz',
'form_factor': 'Laptop',
'kernel': '4.15.0-1036-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad T490s',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201904-26976': {'audio_name': 'Unknown',
'audio_pciid': '10de:10f8',
'codename': 'WhiteHaven MLK 17',
'form_factor': 'Laptop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7740',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Xeon(R) E-2286M CPU @ 2.40GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1eb6',
'wireless': 'Intel - 8086:2723'},
'201904-26981': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Syrah',
'form_factor': 'All-In-One',
'kernel': '4.15.0-1039-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'All-in-One 22-c040la/22-c041la',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168, '
'Realtek Semiconductor Co., Ltd. - 10ec:c821',
'processor': 'Intel(R) Core(TM) i3-8130U CPU @ 2.20GHz',
'video': 'Intel - 8086:5917',
'wireless': ''},
'201904-26983': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Syrah',
'form_factor': 'All-In-One',
'kernel': '4.15.0-1039-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'All-in-One 22-c042la/c043la',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168, '
'Realtek Semiconductor Co., Ltd. - 10ec:c821',
'processor': 'Intel(R) Core(TM) i5-8250U CPU @ 1.60GHz',
'video': 'Intel - 8086:5917',
'wireless': ''},
'201904-26993': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Knockout',
'form_factor': 'Laptop',
'kernel': '5.0.0-1013-oem-osp1',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad X1 Carbon 7th Gen',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201904-27012': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'TurtleBay CFLR',
'form_factor': 'Desktop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 3630 Tower',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-9500 CPU @ 3.00GHz',
'video': 'Intel - 8086:3e92',
'wireless': 'Intel - 8086:2526'},
'201904-27013': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'TurtleBay CFLR',
'form_factor': 'Desktop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 3630 Tower',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Xeon(R) E-2224G CPU @ 3.50GHz',
'video': 'Intel - 8086:3e96',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201904-27018': {'audio_name': 'Unknown',
'audio_pciid': '10de:10f8',
'codename': 'Whitehaven MLK 17',
'form_factor': 'Laptop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7740',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-9300H CPU @ 2.40GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1eb5',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201905-27049': {'audio_name': 'Raven/Raven2/Fenghuang HDMI/DP Audio '
'Controller',
'audio_pciid': '1002:15de',
'codename': 'Drift-AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1038-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad T495',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168, '
'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 7 PRO 3700U w/ Radeon Vega Mobile '
'Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15d8',
'wireless': 'Intel - 8086:2526'},
'201905-27070': {'audio_name': 'Unknown',
'audio_pciid': '1002:15b3',
'codename': 'Loki N3 MLK 15 AMD STR',
'form_factor': 'Laptop',
'kernel': '4.15.0-1039-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3595',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD A6-9225 RADEON R4, 5 COMPUTE CORES 2C+3G',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:98e4',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201905-27071': {'audio_name': 'Unknown',
'audio_pciid': '1002:15b3',
'codename': 'Loki N3 MLK 15 AMD STR',
'form_factor': 'Laptop',
'kernel': '4.15.0-1039-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3595',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD A9-9425 RADEON R5, 5 COMPUTE CORES 2C+3G',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:98e4',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201905-27082': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Vostro 5590',
'form_factor': 'Laptop',
'kernel': '',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 5590',
'network': '',
'processor': 'Intel(R) Core(TM) i5-10210U CPU @ 1.60GHz',
'video': '',
'wireless': ''},
'201905-27083': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Vostro 5590',
'form_factor': 'Laptop',
'kernel': '',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 5590',
'network': '',
'processor': 'Intel(R) Core(TM) i5-10210U CPU @ 1.60GHz',
'video': '',
'wireless': ''},
'201906-27085': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Mantis N5 14 DVT2',
'form_factor': 'Laptop',
'kernel': '',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5490',
'network': '',
'processor': 'Intel(R) Core(TM) i5-10210U CPU @ 1.60GHz',
'video': '',
'wireless': ''},
'201906-27086': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Mantis N5 14 DVT2',
'form_factor': 'Laptop',
'kernel': '5.0.0-1013-oem-osp1',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5490',
'network': '',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41, nVidia - 10de:1d11',
'wireless': 'Intel - 8086:02f0'},
'201906-27087': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Mantis N5 15 DVT2',
'form_factor': 'Laptop',
'kernel': '5.0.0-1013-oem-osp1',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5590',
'network': '',
'processor': 'Intel(R) Core(TM) i5-10210U CPU @ 1.60GHz',
'video': 'Intel - 8086:9b41',
'wireless': 'Intel - 8086:02f0'},
'201906-27088': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Mantis N5 15 DVT2',
'form_factor': 'Laptop',
'kernel': '',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5590',
'network': '',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': '',
'wireless': ''},
'201906-27092': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki V3 MLK 14 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 3490',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41',
'wireless': 'Intel - 8086:02f0'},
'201906-27093': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki V3 MLK 14 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 3490',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:9b41',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201906-27095': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki V3 MLK 15 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 3590',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:9b41',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201906-27096': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N3 MLK 14 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3490',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41',
'wireless': 'Atheros Communications - 168c:0036'},
'201906-27097': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N3 MLK 14 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3490',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:9b41',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201906-27098': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N3 MLK 15 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3590',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41',
'wireless': 'Intel - 8086:02f0'},
'201906-27099': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N3 MLK 15 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3590',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i5-10210U CPU @ 1.60GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:9b41',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201906-27100': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N3 MLK 17 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3790',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41',
'wireless': 'Intel - 8086:02f0'},
'201906-27101': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N3 MLK 17 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3790',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:9b41',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201906-27116': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'WASP N5 CML',
'form_factor': 'Laptop',
'kernel': '5.0.0-1013-oem-osp1',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5391',
'network': '',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:02f0'},
'201906-27117': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'WASP V5 CML',
'form_factor': 'Laptop',
'kernel': '5.0.0-1013-oem-osp1',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 5391',
'network': '',
'processor': 'Intel(R) Core(TM) i5-10210U CPU @ 1.60GHz',
'video': 'Intel - 8086:9b41, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:02f0'},
'201906-27118': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'WASP V5 CML',
'form_factor': 'Laptop',
'kernel': '5.0.0-1013-oem-osp1',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 5391',
'network': '',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:02f0'},
'201906-27126': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Solo2.0',
'form_factor': 'Laptop',
'kernel': '4.15.0-1045-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad L490',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6900, Intel - 8086:3ea0',
'wireless': 'Realtek Semiconductor Co., Ltd. - net:Unknown'},
'201906-27127': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Ratchet',
'form_factor': 'Laptop',
'kernel': '5.0.0-1015-oem-osp1',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad X1 Yoga 4th Gen',
'network': 'Intel - 8086:15bd',
'processor': 'Intel(R) Core(TM) i7-8665U CPU @ 1.90GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201906-27140': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N5 14 MLK CML',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5494',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i3-10110U CPU @ 2.10GHz',
'video': 'Intel - 8086:9b41',
'wireless': 'Intel - 8086:02f0'},
'201906-27143': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'M920s',
'form_factor': 'Desktop',
'kernel': '4.15.0-1039-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkCentre M920 TOWER/SFF/TINY',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Pentium(R) Gold G5420 CPU @ 3.80GHz',
'video': 'Intel - 8086:3e93',
'wireless': ''},
'201906-27144': {'audio_name': 'Baffin HDMI/DP Audio [Radeon RX 550 640SP / '
'RX 560/560X]',
'audio_pciid': '1002:aae0',
'codename': 'M920x',
'form_factor': 'Desktop',
'kernel': '4.15.0-1039-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkCentre M920 TOWER/SFF/TINY',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-9600T CPU @ 2.30GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:67ff',
'wireless': 'Intel - 8086:a370'},
'201906-27146': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'M720q',
'form_factor': 'Desktop',
'kernel': '4.15.0-1039-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkCentre M720 TOWER/SFF/TINY',
'network': 'Intel - 8086:15bc',
'processor': 'Intel(R) Core(TM) i3-9100T CPU @ 3.10GHz',
'video': 'Intel - 8086:3e91',
'wireless': 'Intel - 8086:a370'},
'201906-27168': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Lando2.0',
'form_factor': 'Laptop',
'kernel': '4.15.0-1043-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad L590',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6900, Intel - 8086:3ea0',
'wireless': 'Intel - 8086:2526'},
'201906-27169': {'audio_name': 'Raven/Raven2/Fenghuang HDMI/DP Audio '
'Controller',
'audio_pciid': '1002:15de',
'codename': 'Sideswipe-AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1043-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad X395',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 5 PRO 3500U w/ Radeon Vega Mobile '
'Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15d8',
'wireless': 'Intel - 8086:2526'},
'201906-27170': {'audio_name': 'Raven/Raven2/Fenghuang HDMI/DP Audio '
'Controller',
'audio_pciid': '1002:15de',
'codename': 'Jazz-AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1043-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad T495s',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 7 PRO 3700U w/ Radeon Vega Mobile '
'Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15d8',
'wireless': 'Intel - 8086:2526'},
'201907-27210': {'audio_name': 'GP106 High Definition Audio Controller',
'audio_pciid': '10de:10f1',
'codename': 'P330-Refresh-3.0-1',
'form_factor': 'Desktop',
'kernel': '4.15.0-1045-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'P330 Workstation 2nd Gen',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i9-9900 CPU @ 3.10GHz',
'video': 'nVidia - 10de:1c31',
'wireless': ''},
'201907-27211': {'audio_name': '200 Series PCH HD Audio',
'audio_pciid': '8086:a2f0',
'codename': 'P520c-Refresh-4.0',
'form_factor': 'Desktop',
'kernel': '4.15.0-1045-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkStation P520c',
'network': 'Intel - 8086:15b7',
'processor': 'Intel(R) Xeon(R) W-2133 CPU @ 3.60GHz',
'video': 'nVidia - 10de:1cb1',
'wireless': ''},
'201907-27223': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N5 15 MLK CML',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5594',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i5-10210U CPU @ 1.60GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:9b41',
'wireless': 'Intel - 8086:02f0'},
'201907-27224': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N5 15 MLK CML',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5594',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:9b41',
'wireless': 'Intel - 8086:02f0'},
'201908-27285': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'M720s',
'form_factor': 'Desktop',
'kernel': '4.15.0-1045-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkCentre M720 TOWER/SFF/TINY',
'network': 'Intel - 8086:15bc',
'processor': 'Intel(R) Core(TM) i3-9100 CPU @ 3.60GHz',
'video': 'Intel - 8086:3e91',
'wireless': 'Intel - 8086:a370'}}
cids_after_shrink = \
{'201702-25401': {'audio_name': 'Lewisburg MROM 0',
'audio_pciid': '8086:a1f0',
'codename': 'Matira 5',
'form_factor': 'Desktop',
'kernel': '4.15.0-1027-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision Tower 7820',
'network': 'Intel - 8086:15b9',
'processor': 'Intel(R) Xeon(R) Platinum 8153 CPU @ 2.00GHz',
'video': 'nVidia - 10de:107c',
'wireless': ''},
'201703-25448': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Pearl',
'form_factor': 'Workstation (Desktop)',
'kernel': '4.15.0-1027-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision Rack 7920',
'network': 'Intel - 8086:1521, Intel - 8086:1521, Intel - '
'8086:1521, Intel - 8086:1521',
'processor': 'Intel(R) Xeon(R) Platinum 8276 CPU @ 2.20GHz',
'video': 'Matrox Electronics Systems Ltd. - 102b:0536',
'wireless': ''},
'201806-26285': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Chelmsford',
'form_factor': 'Smart NIC',
'kernel': '4.18.0-1001-mellanox',
'location': 'ceqa',
'make': 'Mellanox Technologies',
'model': 'MBF1M332A',
'network': '',
'processor': 'aarch64',
'video': '',
'wireless': ''},
'201807-26317': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Kylo Ren 15 MLK',
'form_factor': 'Laptop',
'kernel': '4.15.0-1012-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 7580',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d10',
'wireless': 'Intel - 8086:2526'},
'201808-26364': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Punisher WHL',
'form_factor': 'All In One',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3480 AIO',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:174e',
'wireless': 'Intel - 8086:9df0'},
'201808-26387': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'Windu2-AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad A485',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168, '
'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 5 PRO 2500U w/ Radeon Vega Mobile '
'Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Intel - 8086:2526'},
'201808-26442': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'Loki N3 AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1026-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3585',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD Ryzen 3 2300U with Radeon Vega Mobile Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Atheros Communications - 168c:0036'},
'201808-26445': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'Loki N3 AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1026-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3785',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD Ryzen 5 2500U with Radeon Vega Mobile Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201809-26458': {'audio_name': 'GP107GL High Definition Audio Controller',
'audio_pciid': '10de:0fb9',
'codename': 'Chiron',
'form_factor': 'Laptop',
'kernel': '4.15.0-1025-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad P72',
'network': 'Intel - 8086:15bc',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1cba',
'wireless': 'Intel - 8086:a370'},
'201809-26462': {'audio_name': 'GP107GL High Definition Audio Controller',
'audio_pciid': '10de:0fb9',
'codename': 'Padme',
'form_factor': 'Laptop',
'kernel': '4.15.0-1025-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad P1',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-8850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1cbb',
'wireless': 'Intel - 8086:a370'},
'201809-26463': {'audio_name': 'GP107GL High Definition Audio Controller',
'audio_pciid': '10de:0fb9',
'codename': 'Padme',
'form_factor': 'Laptop',
'kernel': '4.15.0-1025-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad P1',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Xeon(R) E-2176M CPU @ 2.70GHz',
'video': 'Intel - 8086:3e94, nVidia - 10de:1cba',
'wireless': 'Intel - 8086:a370'},
'201809-26472': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'BRECKENRIDGE MLK 15P',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'oem',
'make': 'Dell',
'model': 'Precision 3530',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Xeon(R) E-2176M CPU @ 2.70GHz',
'video': 'Intel - 8086:3e94, nVidia - 10de:1cbc',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26502': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Pinehills',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'mainstream',
'make': 'Dell',
'model': 'Latitude 3300',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Pentium(R) CPU 4415U @ 2.30GHz',
'video': 'Intel - 8086:5906',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26513': {'audio_name': '200 Series PCH HD Audio',
'audio_pciid': '8086:a2f0',
'codename': 'Z4G4',
'form_factor': 'Desktop',
'kernel': '4.15.0-1024-oem',
'location': 'mainstream',
'make': 'HP',
'model': 'Z4 G4 Workstation',
'network': 'Intel - 8086:15b7, Intel - 8086:1533',
'processor': 'Intel(R) Xeon(R) W-2145 CPU @ 3.70GHz',
'video': 'nVidia - 10de:1bb0',
'wireless': ''},
'201810-26538': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki V3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Vostro 3584',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz',
'video': 'Intel - 8086:5921',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201810-26540': {'audio_name': 'GP104 High Definition Audio Controller',
'audio_pciid': '10de:10f0',
'codename': 'ZBook 17 G5',
'form_factor': 'Laptop',
'kernel': '5.0.0-21-generic',
'location': 'mainstream',
'make': 'HP',
'model': 'ZBook 17 G5',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1bbb',
'wireless': 'Intel - 8086:a370'},
'201810-26551': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Loki N3 KBL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1021-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3584',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:5921',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26554': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'Loki N3 GLK',
'form_factor': 'Laptop',
'kernel': '4.15.0-1023-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3582',
'network': '',
'processor': 'Intel(R) Celeron(R) N4000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3185',
'wireless': 'Intel - 8086:31dc'},
'201810-26556': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Loki N3 WHL',
'form_factor': 'Laptop',
'kernel': '4.15.0-1024-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3580',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201810-26596': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'Granger6U',
'form_factor': 'Laptop',
'kernel': '4.15.0-1045-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'HP Laptop 14-ck0025la',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Celeron(R) N4000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3185',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:d723'},
'201810-26598': {'audio_name': 'Unknown',
'audio_pciid': '8086:3198',
'codename': 'Granger6U',
'form_factor': 'Laptop',
'kernel': '4.15.0-1045-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'HP Laptop 14-ck0025la',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Pentium(R) Silver N5000 CPU @ 1.10GHz',
'video': 'Intel - 8086:3184',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:d723'},
'201810-26602': {'audio_name': 'Unknown',
'audio_pciid': '1002:15b3',
'codename': 'Hagrid6U',
'form_factor': 'Laptop',
'kernel': '4.15.0-1028-oem',
'location': 'ceqa',
'make': 'HP',
'model': '245 G7 Notebook PC',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD E2-9000e RADEON R2, 4 COMPUTE CORES 2C+2G',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:98e4',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:d723'},
'201810-26611': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'BISON MLK MT',
'form_factor': 'Desktop',
'kernel': '4.15.0-1035-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'OptiPlex 7070',
'network': 'Intel - 8086:1533, Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i9-9900 CPU @ 3.10GHz',
'video': 'Intel - 8086:3e98',
'wireless': ''},
'201811-26617': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Gambit SFF CFLR',
'form_factor': 'Desktop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Inspiron 3470',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Celeron(R) G4930 CPU @ 3.20GHz',
'video': 'Intel - 8086:3e93',
'wireless': ''},
'201811-26631': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Eagle SFF CFL-R',
'form_factor': 'Desktop',
'kernel': '4.15.0-1029-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 3470',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Pentium(R) Gold G5420 CPU @ 3.80GHz',
'video': 'Intel - 8086:3e90',
'wireless': 'Intel - 8086:a370'},
'201811-26638': {'audio_name': 'Unknown',
'audio_pciid': '6549:2200',
'codename': 'Zuma-P CFL-R',
'form_factor': 'Desktop',
'kernel': '4.15.0-1030-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 3930 Rack',
'network': 'Aquantia Corp. - 1d6a:07b1, Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-9600K CPU @ 3.70GHz',
'video': 'Intel - 8086:3e98',
'wireless': ''},
'201811-26647': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Granger14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'Laptop 14-ck0xxx',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i3-7020U CPU @ 2.30GHz',
'video': 'Intel - 8086:5916',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:d723'},
'201811-26655': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Bighorn 21.5 AIO MLK',
'form_factor': 'All-In-One',
'kernel': '4.15.0-1034-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'OptiPlex 5260 AIO',
'network': 'Intel - 8086:15bc',
'processor': 'Intel(R) Pentium(R) Gold G5620 CPU @ 4.00GHz',
'video': 'Intel - 8086:3e91',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201811-26676': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': 'V530-15ARR',
'form_factor': 'Desktop',
'kernel': '4.15.0-1030-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'V530 Desktop',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 3 PRO 2200G with Radeon Vega '
'Graphics',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': ''},
'201811-26677': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'V530-07ICB',
'form_factor': 'Desktop',
'kernel': '4.15.0-1031-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'V530 Desktop',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8400 CPU @ 2.80GHz',
'video': 'Intel - 8086:3e92',
'wireless': 'Realtek Semiconductor Co., Ltd. - 10ec:c821'},
'201811-26679': {'audio_name': 'Raven Ridge HDMI/DP Audio Controller',
'audio_pciid': '1002:15de',
'codename': '705 G4 DM',
'form_factor': 'Desktop',
'kernel': '4.15.0-1030-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'HP EliteDesk 705 G4 DM',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 5 PRO 2400G with Radeon Vega '
'Graphics',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15dd',
'wireless': 'Intel - 8086:095a'},
'201812-26686': {'audio_name': 'TU102 High Definition Audio Controller',
'audio_pciid': '10de:10f7',
'codename': 'Matira 7 CAL',
'form_factor': 'Desktop',
'kernel': '4.15.0-1027-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7920 Tower',
'network': 'Intel - 8086:15b9, Intel - 8086:1533',
'processor': 'Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz',
'video': 'nVidia - 10de:1e30',
'wireless': ''},
'201812-26697': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Vulcan 17',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'G7 7790',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1c8c',
'wireless': 'Intel - 8086:a370'},
'201812-26717': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Bolt L',
'form_factor': 'Laptop',
'kernel': '4.15.0-1029-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 3500',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:174d',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201812-26719': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': '640 G4',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'ceqa',
'make': 'HP',
'model': '640 G4 Notebook PC',
'network': 'Intel - 8086:15d7',
'processor': 'Intel(R) Core(TM) i5-8250U CPU @ 1.60GHz',
'video': 'Intel - 8086:5917',
'wireless': 'Intel - 8086:24fd'},
'201901-26797': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': '800 G4 SFF',
'form_factor': 'Desktop',
'kernel': '4.15.0-1034-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'EliteDesk 800 G4 SFF',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz',
'video': 'Intel - 8086:3e92',
'wireless': 'Intel - 8086:095a'},
'201902-26842': {'audio_name': 'TU106 High Definition Audio Controller',
'audio_pciid': '10de:10f9',
'codename': 'Vulcan 15B DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'G7 7590',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:2502',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'nVidia - 10de:1f50',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201902-26843': {'audio_name': 'Unknown',
'audio_pciid': '10de:10f8',
'codename': 'Vulcan 17 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1032-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'G7 7790',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:2502',
'processor': 'Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1e90',
'wireless': 'Intel - 8086:2526'},
'201902-26858': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'NorthBay 15 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 5500',
'network': 'Intel - 8086:15bd',
'processor': 'Intel(R) Core(TM) i7-8665U CPU @ 1.90GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6987, Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201902-26859': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'NorthBay 15P DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1030-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Precision 3540',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6985, Intel - 8086:3ea0',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201903-26895': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Brook Hollow 15P',
'form_factor': 'Laptop',
'kernel': '4.15.0-1033-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Precision 3541',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1cbd',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201903-26908': {'audio_name': 'Audio is not found',
'audio_pciid': 'Unknown pciid',
'codename': 'Pearl CSL',
'form_factor': 'Workstation (Desktop)',
'kernel': '4.15.0-1027-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7920 Rack',
'network': 'Intel - 8086:1572, Intel - 8086:1572, Intel - '
'8086:1521, Intel - 8086:1521',
'processor': 'Intel(R) Xeon(R) Gold 5215L CPU @ 2.50GHz',
'video': 'Matrox Electronics Systems Ltd. - 102b:0536',
'wireless': ''},
'201903-26910': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Brook Hollow 14',
'form_factor': 'Laptop',
'kernel': '4.15.0-1033-oem',
'location': 'cert-taipei',
'make': 'Dell',
'model': 'Latitude 5401',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-9850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1d10',
'wireless': 'Intel - 8086:a370'},
'201903-26914': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Brook Hollow 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1033-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Latitude 5501',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-9850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1c90',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201903-26920': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Berlinetta P CFL-R',
'form_factor': 'Laptop',
'kernel': '4.15.0-1036-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 5540',
'network': '',
'processor': 'Intel(R) Xeon(R) E-2276M CPU @ 2.80GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1fb8',
'wireless': 'Intel - 8086:2526'},
'201903-26932': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Berlinetta P CFL-R',
'form_factor': 'Laptop',
'kernel': '4.15.0-1036-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 5540',
'network': '',
'processor': 'Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1fb8',
'wireless': 'Intel - 8086:2526'},
'201904-26938': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'Berlinetta P CFL-R',
'form_factor': 'Laptop',
'kernel': '4.15.0-1036-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 5540',
'network': '',
'processor': 'Intel(R) Core(TM) i5-9400H CPU @ 2.50GHz',
'video': 'Intel - 8086:3e9b',
'wireless': 'Intel - 8086:2723'},
'201904-26940': {'audio_name': 'Unknown',
'audio_pciid': '10de:10fa',
'codename': 'Whitehaven MLK 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7540',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-9850H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1fb9',
'wireless': 'Intel - 8086:2723'},
'201904-26941': {'audio_name': 'TU106 High Definition Audio Controller',
'audio_pciid': '10de:10f9',
'codename': 'Whitehaven MLK 15',
'form_factor': 'Laptop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7540',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1f36',
'wireless': 'Intel - 8086:2723'},
'201904-26971': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Ironhide-T',
'form_factor': 'Laptop',
'kernel': '4.15.0-1039-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad T590',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz',
'video': 'Intel - 8086:3ea0, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:9df0'},
'201904-26976': {'audio_name': 'Unknown',
'audio_pciid': '10de:10f8',
'codename': 'WhiteHaven MLK 17',
'form_factor': 'Laptop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7740',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Xeon(R) E-2286M CPU @ 2.40GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1eb6',
'wireless': 'Intel - 8086:2723'},
'201904-26983': {'audio_name': 'Sunrise Point-LP HD Audio',
'audio_pciid': '8086:9d71',
'codename': 'Syrah',
'form_factor': 'All-In-One',
'kernel': '4.15.0-1039-oem',
'location': 'ceqa',
'make': 'HP',
'model': 'All-in-One 22-c042la/c043la',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168, '
'Realtek Semiconductor Co., Ltd. - 10ec:c821',
'processor': 'Intel(R) Core(TM) i5-8250U CPU @ 1.60GHz',
'video': 'Intel - 8086:5917',
'wireless': ''},
'201904-27013': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'TurtleBay CFLR',
'form_factor': 'Desktop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 3630 Tower',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Xeon(R) E-2224G CPU @ 3.50GHz',
'video': 'Intel - 8086:3e96',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201904-27018': {'audio_name': 'Unknown',
'audio_pciid': '10de:10f8',
'codename': 'Whitehaven MLK 17',
'form_factor': 'Laptop',
'kernel': '4.15.0-1037-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Precision 7740',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-9300H CPU @ 2.40GHz',
'video': 'Intel - 8086:3e9b, nVidia - 10de:1eb5',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201905-27049': {'audio_name': 'Raven/Raven2/Fenghuang HDMI/DP Audio '
'Controller',
'audio_pciid': '1002:15de',
'codename': 'Drift-AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1038-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad T495',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168, '
'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 7 PRO 3700U w/ Radeon Vega Mobile '
'Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15d8',
'wireless': 'Intel - 8086:2526'},
'201905-27070': {'audio_name': 'Unknown',
'audio_pciid': '1002:15b3',
'codename': 'Loki N3 MLK 15 AMD STR',
'form_factor': 'Laptop',
'kernel': '4.15.0-1039-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3595',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD A6-9225 RADEON R4, 5 COMPUTE CORES 2C+3G',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:98e4',
'wireless': 'Qualcomm Atheros - 168c:0042'},
'201905-27071': {'audio_name': 'Unknown',
'audio_pciid': '1002:15b3',
'codename': 'Loki N3 MLK 15 AMD STR',
'form_factor': 'Laptop',
'kernel': '4.15.0-1039-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3595',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'AMD A9-9425 RADEON R5, 5 COMPUTE CORES 2C+3G',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:98e4',
'wireless': 'Qualcomm Atheros - 168c:003e'},
'201906-27086': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Mantis N5 14 DVT2',
'form_factor': 'Laptop',
'kernel': '5.0.0-1013-oem-osp1',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5490',
'network': '',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41, nVidia - 10de:1d11',
'wireless': 'Intel - 8086:02f0'},
'201906-27096': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N3 MLK 14 DVT2',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 3490',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41',
'wireless': 'Atheros Communications - 168c:0036'},
'201906-27118': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'WASP V5 CML',
'form_factor': 'Laptop',
'kernel': '5.0.0-1013-oem-osp1',
'location': 'ceqa',
'make': 'Dell',
'model': 'Vostro 5391',
'network': '',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Intel - 8086:9b41, nVidia - 10de:1d13',
'wireless': 'Intel - 8086:02f0'},
'201906-27126': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Solo2.0',
'form_factor': 'Laptop',
'kernel': '4.15.0-1045-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad L490',
'network': 'Intel - 8086:15be',
'processor': 'Intel(R) Core(TM) i5-8265U CPU @ 1.60GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6900, Intel - 8086:3ea0',
'wireless': 'Realtek Semiconductor Co., Ltd. - net:Unknown'},
'201906-27127': {'audio_name': 'Cannon Point-LP High Definition Audio '
'Controller',
'audio_pciid': '8086:9dc8',
'codename': 'Ratchet',
'form_factor': 'Laptop',
'kernel': '5.0.0-1015-oem-osp1',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad X1 Yoga 4th Gen',
'network': 'Intel - 8086:15bd',
'processor': 'Intel(R) Core(TM) i7-8665U CPU @ 1.90GHz',
'video': 'Intel - 8086:3ea0',
'wireless': 'Intel - 8086:9df0'},
'201906-27140': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N5 14 MLK CML',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5494',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i3-10110U CPU @ 2.10GHz',
'video': 'Intel - 8086:9b41',
'wireless': 'Intel - 8086:02f0'},
'201906-27143': {'audio_name': 'Cannon Lake PCH cAVS',
'audio_pciid': '8086:a348',
'codename': 'M920s',
'form_factor': 'Desktop',
'kernel': '4.15.0-1039-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkCentre M920 TOWER/SFF/TINY',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Pentium(R) Gold G5420 CPU @ 3.80GHz',
'video': 'Intel - 8086:3e93',
'wireless': ''},
'201906-27144': {'audio_name': 'Baffin HDMI/DP Audio [Radeon RX 550 640SP / '
'RX 560/560X]',
'audio_pciid': '1002:aae0',
'codename': 'M920x',
'form_factor': 'Desktop',
'kernel': '4.15.0-1039-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkCentre M920 TOWER/SFF/TINY',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i5-9600T CPU @ 2.30GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:67ff',
'wireless': 'Intel - 8086:a370'},
'201906-27169': {'audio_name': 'Raven/Raven2/Fenghuang HDMI/DP Audio '
'Controller',
'audio_pciid': '1002:15de',
'codename': 'Sideswipe-AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1043-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad X395',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 5 PRO 3500U w/ Radeon Vega Mobile '
'Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15d8',
'wireless': 'Intel - 8086:2526'},
'201906-27170': {'audio_name': 'Raven/Raven2/Fenghuang HDMI/DP Audio '
'Controller',
'audio_pciid': '1002:15de',
'codename': 'Jazz-AMD',
'form_factor': 'Laptop',
'kernel': '4.15.0-1043-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkPad T495s',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8168',
'processor': 'AMD Ryzen 7 PRO 3700U w/ Radeon Vega Mobile '
'Gfx',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - 1002:15d8',
'wireless': 'Intel - 8086:2526'},
'201907-27210': {'audio_name': 'GP106 High Definition Audio Controller',
'audio_pciid': '10de:10f1',
'codename': 'P330-Refresh-3.0-1',
'form_factor': 'Desktop',
'kernel': '4.15.0-1045-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'P330 Workstation 2nd Gen',
'network': 'Intel - 8086:15bb',
'processor': 'Intel(R) Core(TM) i9-9900 CPU @ 3.10GHz',
'video': 'nVidia - 10de:1c31',
'wireless': ''},
'201907-27211': {'audio_name': '200 Series PCH HD Audio',
'audio_pciid': '8086:a2f0',
'codename': 'P520c-Refresh-4.0',
'form_factor': 'Desktop',
'kernel': '4.15.0-1045-oem',
'location': 'beijing',
'make': 'Lenovo',
'model': 'ThinkStation P520c',
'network': 'Intel - 8086:15b7',
'processor': 'Intel(R) Xeon(R) W-2133 CPU @ 3.60GHz',
'video': 'nVidia - 10de:1cb1',
'wireless': ''},
'201907-27224': {'audio_name': 'Unknown',
'audio_pciid': '8086:02c8',
'codename': 'Loki N5 15 MLK CML',
'form_factor': 'Laptop',
'kernel': '4.15.0-1042-oem',
'location': 'ceqa',
'make': 'Dell',
'model': 'Inspiron 5594',
'network': 'Realtek Semiconductor Co., Ltd. - 10ec:8136',
'processor': 'Intel(R) Core(TM) i7-10510U CPU @ 1.80GHz',
'video': 'Advanced Micro Devices, Inc. [AMD/ATI] - '
'1002:6665, Intel - 8086:9b41',
'wireless': 'Intel - 8086:02f0'}}
| 51.114618
| 80
| 0.434741
| 14,128
| 153,855
| 4.682899
| 0.053298
| 0.055366
| 0.030336
| 0.033706
| 0.986034
| 0.984462
| 0.979671
| 0.976481
| 0.97166
| 0.967065
| 0
| 0.154225
| 0.407163
| 153,855
| 3,009
| 81
| 51.131605
| 0.571126
| 0
| 0
| 0.937168
| 0
| 0
| 0.503364
| 0.000858
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7812fd3d8be452c007df1df98bf8dea436fc77e9
| 2,139
|
py
|
Python
|
tests/etl/workflow/treatment_and_component_helper/test_treatment_and_component_helper.py
|
PDCMFinder/pdcm-etl
|
df0006e4ad5ca2ddf9c1387e28a0b7fb24f195de
|
[
"Apache-2.0"
] | 1
|
2022-01-28T16:01:59.000Z
|
2022-01-28T16:01:59.000Z
|
tests/etl/workflow/treatment_and_component_helper/test_treatment_and_component_helper.py
|
PDCMFinder/pdcm-etl
|
df0006e4ad5ca2ddf9c1387e28a0b7fb24f195de
|
[
"Apache-2.0"
] | 37
|
2022-02-09T18:19:13.000Z
|
2022-03-29T12:14:19.000Z
|
tests/etl/workflow/treatment_and_component_helper/test_treatment_and_component_helper.py
|
PDCMFinder/pdcm-etl
|
df0006e4ad5ca2ddf9c1387e28a0b7fb24f195de
|
[
"Apache-2.0"
] | null | null | null |
from etl.jobs.transformation.treatment_and_component_helper_transformer_job import \
transform_treatment_and_component_helper
from tests.etl.workflow.treatment_and_component_helper.expected_outputs import *
from tests.etl.workflow.treatment_and_component_helper.input_data import *
from tests.util import convert_to_dataframe, assert_df_are_equal_ignore_id
def test_one_treatment_one_dose(spark_session):
treatment_protocol_df = convert_to_dataframe(spark_session, treatment_protocol_one_treatment_one_dose)
treatment_and_component_helper_df = transform_treatment_and_component_helper(treatment_protocol_df)
expected_df = convert_to_dataframe(spark_session, expected_one_treatment_one_dose)
assert_df_are_equal_ignore_id(treatment_and_component_helper_df, expected_df)
def test_several_treatment_several_dose(spark_session):
treatment_protocol_df = convert_to_dataframe(spark_session, treatment_protocol_several_treatment_several_doses)
treatment_and_component_helper_df = transform_treatment_and_component_helper(treatment_protocol_df)
expected_df = convert_to_dataframe(spark_session, expected_several_treatment_several_doses)
assert_df_are_equal_ignore_id(treatment_and_component_helper_df, expected_df)
def test_one_treatment_several_dose(spark_session):
treatment_protocol_df = convert_to_dataframe(spark_session, treatment_protocol_one_treatment_several_doses)
treatment_and_component_helper_df = transform_treatment_and_component_helper(treatment_protocol_df)
expected_df = convert_to_dataframe(spark_session, expected_one_treatment_several_doses)
assert_df_are_equal_ignore_id(treatment_and_component_helper_df, expected_df)
def test_several_treatment_one_dose(spark_session):
treatment_protocol_df = convert_to_dataframe(spark_session, treatment_protocol_several_treatments_one_dose)
treatment_and_component_helper_df = transform_treatment_and_component_helper(treatment_protocol_df)
expected_df = convert_to_dataframe(spark_session, expected_several_treatments_one_dose)
assert_df_are_equal_ignore_id(treatment_and_component_helper_df, expected_df)
| 56.289474
| 115
| 0.886863
| 291
| 2,139
| 5.845361
| 0.130584
| 0.112875
| 0.197531
| 0.253968
| 0.895356
| 0.874192
| 0.860082
| 0.860082
| 0.804821
| 0.804821
| 0
| 0
| 0.073866
| 2,139
| 37
| 116
| 57.810811
| 0.858657
| 0
| 0
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.16
| false
| 0
| 0.16
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7836cada81f90ded99c58d5942eea4c3477f58fc
| 208
|
py
|
Python
|
ldm/modules/image_degradation/__init__.py
|
samedii/latent-diffusion
|
f13bf9bf463d95b5a16aeadd2b02abde31f769f8
|
[
"MIT"
] | 563
|
2021-12-21T02:26:38.000Z
|
2022-03-31T05:54:51.000Z
|
ldm/modules/image_degradation/__init__.py
|
samedii/latent-diffusion
|
f13bf9bf463d95b5a16aeadd2b02abde31f769f8
|
[
"MIT"
] | 23
|
2021-12-22T10:00:00.000Z
|
2022-03-24T20:43:49.000Z
|
ldm/modules/image_degradation/__init__.py
|
samedii/latent-diffusion
|
f13bf9bf463d95b5a16aeadd2b02abde31f769f8
|
[
"MIT"
] | 51
|
2021-12-21T02:27:04.000Z
|
2022-03-23T12:30:31.000Z
|
from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
| 69.333333
| 109
| 0.913462
| 30
| 208
| 5.933333
| 0.4
| 0.382022
| 0.157303
| 0.213483
| 0.94382
| 0.94382
| 0.539326
| 0.539326
| 0
| 0
| 0
| 0
| 0.057692
| 208
| 2
| 110
| 104
| 0.908163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
7873892c1e9518e37f309d502ac9defb07e75b07
| 8,223
|
py
|
Python
|
esgetfamily/tests/test_esgetfamily.py
|
Saurabh3012/esgetfamily-python
|
86f1a6f477deae1f2d240b22b7f463f82430245b
|
[
"MIT"
] | null | null | null |
esgetfamily/tests/test_esgetfamily.py
|
Saurabh3012/esgetfamily-python
|
86f1a6f477deae1f2d240b22b7f463f82430245b
|
[
"MIT"
] | null | null | null |
esgetfamily/tests/test_esgetfamily.py
|
Saurabh3012/esgetfamily-python
|
86f1a6f477deae1f2d240b22b7f463f82430245b
|
[
"MIT"
] | null | null | null |
import unittest
class TestRequirements(unittest.TestCase):
def test01(self):
try:
from elasticsearch import Elasticsearch
except ImportError:
self.assert_(False, "Elasticsearch not installed")
class Test01SingleParent(unittest.TestCase):
def setUp(self):
from elasticsearch import Elasticsearch
try:
self.es_db = Elasticsearch()
except:
self.assertFalse("Elasticsearch Connection can't be made.")
self.parent_index = "parents"
self.parent_type = "pars"
self.child_type = "childs"
# Creating mapping
mapping = {
"mappings": {
self.parent_type: {
"properties": {
"id": {"type": "integer"},
"name": {"type": "text"},
"age": {"type": "integer"}
}
},
self.child_type: {
"_parent": {
"type": self.parent_type
},
"properties": {
"id": {"type": "integer"},
"name": {"type": "text"}
}
}
}
}
self.es_db.indices.create(index=self.parent_index, ignore=400,
body=mapping)
# Indexing Parent
self.es_db.index(index=self.parent_index, doc_type=self.parent_type,
body={u"id": 1, u"name": 'Example', u"age": 1}, id=1)
# Indexing Children
self.es_db.index(index=self.parent_index, doc_type=self.child_type,
body={u"id": 1, u"name": 'Child1'}, id=1, parent=1)
self.es_db.index(index=self.parent_index, doc_type=self.child_type,
body={u"id": 2, u"name": 'Child2'}, id=2, parent=1)
import esgetfamily
self.parents = self.es_db.get(index=self.parent_index,
doc_type=self.parent_type, id=1)
self.result = esgetfamily.parent_child(self.es_db, self.parents,
self.child_type)
def test01_keys(self):
for r in self.result:
if self.result[r]['child'] and self.result[r]['parent']:
success = True
else:
success = False
if success:
self.assertTrue("Test Passed")
else:
self.assertFalse("Test Failed")
def test02_children(self):
for r in self.result:
number_of_children = len(self.result[r]['child'])
if number_of_children == 2:
self.assertTrue("Test Passed")
else:
self.assertFalse("Test Failed")
def test03_query(self):
import esgetfamily
query = {
"match": {
"id": 1
}
}
result = esgetfamily.parent_child(self.es_db, self.parents,
self.child_type, query)
for r in self.result:
number_of_children = len(result[r]['child'])
if number_of_children == 1:
self.assertTrue("Test Passed")
else:
self.assertFalse("Test Failed")
def tearDown(self):
self.es_db.delete(index=self.parent_index,
doc_type=self.parent_type, id=1)
self.es_db.delete(index=self.parent_index,
doc_type=self.child_type, id=1, parent=1)
self.es_db.delete(index=self.parent_index,
doc_type=self.child_type, id=2, parent=1)
class Test02MultipleParent(unittest.TestCase):
def setUp(self):
from elasticsearch import Elasticsearch
try:
self.es_db = Elasticsearch()
except:
self.assertFalse("Elasticsearch Connection can't be made.")
self.parent_index = "parents"
self.parent_type = "pars"
self.child_type = "childs"
# Creating mapping
mapping = {
"mappings": {
self.parent_type: {
"properties": {
"id": {"type": "integer"},
"name": {"type": "text"},
"age": {"type": "integer"}
}
},
self.child_type: {
"_parent": {
"type": self.parent_type
},
"properties": {
"id": {"type": "integer"},
"name": {"type": "text"}
}
}
}
}
self.es_db.indices.create(index=self.parent_index, ignore=400,
body=mapping)
# Indexing Parents
self.es_db.index(index=self.parent_index, doc_type=self.parent_type,
body={u"id": 1, u"name": 'Example1', u"age": 1}, id=1)
self.es_db.index(index=self.parent_index, doc_type=self.parent_type,
body={u"id": 2, u"name": 'Example2', u"age": 1}, id=2)
# Indexing Children
self.es_db.index(index=self.parent_index, doc_type=self.child_type,
body={u"id": 1, u"name": 'Child1'}, id=1, parent=1)
self.es_db.index(index=self.parent_index, doc_type=self.child_type,
body={u"id": 2, u"name": 'Child2'}, id=2, parent=1)
self.es_db.index(index=self.parent_index, doc_type=self.child_type,
body={u"id": 1, u"name": 'Child1'}, id=1, parent=2)
self.es_db.index(index=self.parent_index, doc_type=self.child_type,
body={u"id": 2, u"name": 'Child2'}, id=2, parent=2)
import esgetfamily
self.parents = [self.es_db.get(index=self.parent_index,
doc_type=self.parent_type, id=1),
self.es_db.get(index=self.parent_index,
doc_type=self.parent_type, id=2)]
self.result = esgetfamily.parent_child(self.es_db, self.parents,
self.child_type)
def test01_keys(self):
for r in self.result:
if self.result[r]['child'] and self.result[r]['parent']:
success = True
else:
success = False
if success:
self.assertTrue("Test Passed")
else:
self.assertFalse("Test Failed")
def test02_children(self):
number_of_children = 0
for r in self.result:
number_of_children += len(self.result[r]['child'])
if number_of_children == 4:
self.assertTrue("Test Passed")
else:
self.assertFalse("Test Failed")
def test03_query(self):
import esgetfamily
number_of_children = 0
query = {
"match": {
"id": 1
}
}
result = esgetfamily.parent_child(self.es_db, self.parents,
self.child_type, query)
for r in self.result:
number_of_children += len(result[r]['child'])
if number_of_children == 2:
self.assertTrue("Test Passed")
else:
self.assertFalse("Test Failed")
def tearDown(self):
self.es_db.delete(index=self.parent_index,
doc_type=self.parent_type, id=1)
self.es_db.delete(index=self.parent_index,
doc_type=self.parent_type, id=2)
self.es_db.delete(index=self.parent_index,
doc_type=self.child_type, id=1, parent=1)
self.es_db.delete(index=self.parent_index,
doc_type=self.child_type, id=2, parent=1)
self.es_db.delete(index=self.parent_index,
doc_type=self.child_type, id=1, parent=2)
self.es_db.delete(index=self.parent_index,
doc_type=self.child_type, id=2, parent=2)
if __name__ == "__main__":
unittest.main()
| 33.70082
| 79
| 0.496048
| 888
| 8,223
| 4.423423
| 0.099099
| 0.101833
| 0.059063
| 0.117108
| 0.92057
| 0.916752
| 0.916752
| 0.914969
| 0.914969
| 0.914969
| 0
| 0.016929
| 0.389396
| 8,223
| 243
| 80
| 33.839506
| 0.765385
| 0.012404
| 0
| 0.756614
| 0
| 0
| 0.081711
| 0
| 0
| 0
| 0
| 0
| 0.079365
| 1
| 0.058201
| false
| 0.031746
| 0.047619
| 0
| 0.121693
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
788ee7fdc0cd899a43981d0e5312966277eec428
| 66,675
|
py
|
Python
|
pirates/leveleditor/worldData/piratesWorld_original.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/leveleditor/worldData/piratesWorld_original.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/leveleditor/worldData/piratesWorld_original.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'AmbientColors': {0: Vec4(0.207843, 0.243137, 0.447059, 1),2: Vec4(0.666667, 0.721569, 0.792157, 1),4: Vec4(0.721569, 0.611765, 0.619608, 1),6: Vec4(0.207843, 0.243137, 0.447059, 1),8: Vec4(0.384314, 0.419608, 0.564706, 1)},'DirectionalColors': {0: Vec4(0.956863, 0.909804, 0.894118, 1),2: Vec4(1, 1, 1, 1),4: Vec4(0.439216, 0.176471, 0, 1),6: Vec4(0.513726, 0.482353, 0.639216, 1),8: Vec4(0.447059, 0.439216, 0.537255, 1)},'FogColors': {0: Vec4(0.172549, 0.180392, 0.290196, 1),2: Vec4(0.894118, 0.894118, 1, 1),4: Vec4(0.231373, 0.203922, 0.184314, 1),6: Vec4(0.172549, 0.180392, 0.290196, 1),8: Vec4(0.129412, 0.137255, 0.203922, 1)},'FogRanges': {0: 0.000699999975040555,2: 0.00019999999494757503,4: 0.00039999998989515007,6: 0.000699999975040555,8: 0.0},'Objects': {'1141410776.53sdnaik': {'Type': 'Region','Name': 'default','Objects': {'1150922126.8dzlu': {'Type': 'Island','Name': 'Port Royal','File': 'PortRoyalIsland','Environment': 'Interior','Hpr': Point3(-180.0, 0.0, 0.0),'Minimap': False,'Objects': {'1157060429.94sdnaik': {'Type': 'LOD Sphere','Hpr': VBase3(136.467, 0.0, 0.0),'Pos': Point3(524.742, 2115.765, 580.947),'Radi': [4500.0, 5000.0, 7500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Pos': Point3(-186.051, 1925.536, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_portRoyal'}},'1156207188.95dzlu': {'Type': 'Island','Name': 'Tortuga','File': 'tortugaIsland','Hpr': VBase3(-180.0, 0.0, 0.0),'Minimap': False,'Objects': {'1158214327.11sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-166.294, -732.313, 210.853),'Radi': [2700.0, 3200.0, 6000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(10568.492, 16760.082, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visual': {'Model': 'models/islands/pir_m_are_isl_tortuga'}},'1156359855.24bbathen': {'Type': 'Island','Name': 'Isla Cangrejos','File': 'CangrejosIsland','Environment': 'Interior','Hpr': Point3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1158296490.13sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-40.882, 79.287, 47.831),'Radi': [700.0, 1100.0, 4500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(20199.543, 24930.109, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_cangrejos'}},'1159933206.48sdnaik': {'Type': 'Island','Name': 'Kingshead','File': 'KingsheadIsland','Environment': 'Interior','Hpr': Point3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1162600600.5sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-96.153, -25.37, 136.443),'Radi': [1550.0, 2100.0, 6000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(21048.535, -12029.463, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_kingshead'}},'1160614528.73sdnaik': {'Type': 'Island','Name': 'Cuba','File': 'CubaIsland','Environment': 'Interior','Hpr': Point3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1163119750.53sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(154.735, 0.0, 121.512),'Radi': [2200.0, 2700.0, 7000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(-16098.668, 1857.619, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_cuba'}},'1161282725.84kmuller': {'Type': 'Island','Name': "Rumrunner's Isle",'File': 'RumrunnerIsland','Hpr': VBase3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1161664293.39sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-3.644, 24.436, -0.33),'Radi': [800.0, 1200.0, 4500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(656.261, 21799.172, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visual': {'Model': 'models/islands/pir_m_are_isl_rumRunner'}},'1164135492.81dzlu': {'Type': 'Island','Name': "Devil's Anvil",'File': 'AnvilIsland','Environment': 'Interior','Hpr': VBase3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1164760719.72sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-51.333, -49.722, 287.083),'Radi': [800.0, 1300.0, 4500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(9109.271, 7179.465, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_devilsAnvil'}},'1164150392.42dzlu': {'Type': 'Island','Name': 'Isla Tormenta','File': 'TormentaIsland','Hpr': Point3(0.0, 0.0, 0.0),'Objects': {'1173381974.5sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(616.474, 321.018, -18.171),'Radi': [1800.0, 2300.0, 4500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(-16722.863, -10071.313, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'Visual': {'Model': 'models/islands/pir_m_are_isl_tormenta'}},'1164157132.99dzlu': {'Type': 'Island','Name': 'Isla Perdida','File': 'PerdidaIsland','Environment': 'Interior','Hpr': Point3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1164763977.22sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-29.059, 204.24, 439.628),'Radi': [1300.0, 1800.0, 6000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(-20199.961, 18180.18, 0.32),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_perdida'}},'1164763706.66sdnaik': {'Type': 'Island','Name': 'Driftwood Island','File': 'DriftwoodIsland','Environment': 'Interior','Hpr': VBase3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1164763735.42sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-40.882, 79.287, 47.831),'Radi': [650.0, 1100.0, 4500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(-9286.006, 11719.344, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_driftwood'}},'1173381952.2sdnaik': {'Type': 'Island','Name': 'Outcast Isle','File': 'OutcastIsland','Hpr': Point3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1164760526.77sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(19.047, 304.658, 475.384),'Radi': [700.0, 1100.0, 4500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(-10389.661, -19767.42, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visual': {'Model': 'models/islands/pir_m_are_isl_outcast'}},'1173382404.64sdnaik': {'Type': 'Island','Name': 'Cutthroat Isle','File': 'CutthroatIsland','Environment': 'Interior','Hpr': Point3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1173382432.38sdnaik': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(82.898, 17.675, 160.281),'Radi': [1200.0, 1600.0, 4500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(13299.626, -10296.977, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_cutthroat'}},'1185235968.0dxschafe0': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(7536.718, 20771.877, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'NAVY_KINGFISHER','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1185235968.0dxschafe1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-20005.879, 15829.349, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1185236224.0dxschafe': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-24653.135, -1986.28, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1185236224.0dxschafe3': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(-20880.082, 16510.27, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'EITC_BARRACUDA','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1185236480.0dxschafe': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(18753.035, -13291.3, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1185236480.0dxschafe1': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(22723.27, -11160.567, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'NAVY_MAN_O_WAR','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1185236736.0dxschafe': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(-23213.016, -3726.756, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'EITC_SENTINEL','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1185236864.0dxschafe': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(15434.296, 23497.52, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1185237120.0dxschafe': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-25238.758, -5313.116, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1185237120.0dxschafe1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-10399.86, -20861.783, 39.996),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1185237120.0dxschafe2': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(-9856.596, -17695.881, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'NAVY_COLOSSUS','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1185237248.0dxschafe': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(22025.379, -9170.477, 0.004),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'EITC_CORVETTE','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1196970035.53sdnaik': {'Type': 'Island','Name': 'Isla de la Avaricia','File': 'pvpShipIsland2','Environment': 'Interior','Hpr': VBase3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1196970432.69sdnaik': {'Type': 'LOD Sphere','Hpr': VBase3(-24.178, 0.0, 0.0),'Pos': Point3(-1.631, -36.854, 0.254),'Radi': [600.0, 1100.0, 4500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'PVPTeam': 2,'Pos': Point3(20505.961, -106.404, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_pvpSpanish'}},'1196970080.56sdnaik': {'Type': 'Island','Name': "Ile d'Etable de Porc",'File': 'pvpShipIsland1','Environment': 'Interior','Hpr': VBase3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1196970440.66sdnaik': {'Type': 'LOD Sphere','Hpr': VBase3(155.709, 0.0, 0.0),'Pos': Point3(70.641, -22.088, 0.0),'Radi': [600.0, 1100.0, 4500.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'PVPTeam': 1,'Pos': Point3(25429.918, 11463.408, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_pvpFrench'}},'1201636857.8kmuller': {'Type': 'Island','File': 'pvp_rock_big_1','Environment': 'Interior','Hpr': VBase3(-44.631, 0.0, 0.0),'Minimap': False,'Objects': {'1210981042.09kmuller': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(68.294, 79.498, 0.0),'Radi': [300.0, 1000.0, 3000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Pos': Point3(18809.854, 5488.201, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': True,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_rockBig'}},'1201641372.09kmuller': {'Type': 'Island','File': 'pvp_rock_med_1','Environment': 'Interior','Hpr': VBase3(-22.62, 0.0, 0.0),'Minimap': False,'Objects': {'1210981113.03kmuller': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(47.431, 46.449, 0.0),'Radi': [300.0, 1000.0, 3000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Pos': Point3(20269.568, 5435.323, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': True,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_rockMed'}},'1201641393.2kmuller': {'Type': 'Island','File': 'pvp_rock_big_3','Environment': 'Interior','Hpr': VBase3(333.579, 0.0, 0.0),'Minimap': False,'Objects': {'1210981144.78kmuller': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(50.724, -12.372, 0.0),'Radi': [300.0, 1000.0, 3000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Pos': Point3(25643.311, 5591.078, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': True,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_rockBig'}},'1201641405.3kmuller': {'Type': 'Island','File': 'pvp_rock_med_2','Environment': 'Interior','Hpr': VBase3(163.401, 0.0, 0.0),'Minimap': False,'Objects': {'1210981205.23kmuller': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(20.789, 44.607, 0.0),'Radi': [500.0, 1000.0, 3000.0],'Scale': VBase3(0.511, 0.511, 0.511),'VisSize': ''}},'Pos': Point3(28189.699, 7544.893, 0.0),'Scale': VBase3(1.956, 1.956, 1.956),'Undockable': True,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_rockMed'}},'1201641438.69kmuller': {'Type': 'Island','File': 'pvp_rock_big_2','Environment': 'Interior','Hpr': VBase3(140.533, 0.0, 0.0),'Minimap': False,'Objects': {'1210981266.57kmuller': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(50.724, -12.372, 0.0),'Radi': [400.0, 1000.0, 3000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Pos': Point3(27261.391, 1133.732, 0.001),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': True,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_rockBig'}},'1201641487.41kmuller': {'Type': 'Island','File': 'pvp_rock_sml_1','Environment': 'Interior','Hpr': VBase3(0.0, 0.0, 0.0),'Minimap': False,'Objects': {'1210981190.74kmuller': {'Type': 'LOD Sphere','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(11.657, 10.594, 0.0),'Radi': [100.0, 400.0, 1200.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Pos': Point3(24676.654, 6182.396, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': True,'VisSize': '','Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_rockSmall'}},'1210197632.0WDIG': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(14231.835, 30915.141, 0.004),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'SKEL_SHADOW_CROW_FR','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1210197760.0WDIG': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(26959.834, 31042.932, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'SKEL_HELLHOUND_FR','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1210197760.0WDIG0': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(27378.008, 17669.408, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'SKEL_BLOOD_SCOURGE_FR','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1210197760.0WDIG1': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(8032.644, -5340.035, 39.992),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'SKEL_SHADOW_CROW_SP','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1210197760.0WDIG2': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(18731.025, -5261.008, -0.004),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'SKEL_HELLHOUND_SP','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1210197760.0WDIG3': {'Type': 'Ship Spawn Node','Flagship': True,'Hpr': Point3(0.0, 0.0, 0.0),'Level': '3','Pos': Point3(13163.561, -15632.86, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'SKEL_BLOOD_SCOURGE_SP','Team': 'default','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1210197760.0WDIG4': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(24100.469, 29200.008, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1210197760.0WDIG5': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(24590.801, 20762.211, 39.988),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1210197888.0WDIG': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(18258.734, 20859.678, 39.992),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1210197888.0WDIG0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(15509.665, -7156.37, 40.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1210197888.0WDIG1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(15791.191, -12857.672, -0.012),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1210197888.0WDIG2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(8795.961, -8645.371, 39.996),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1233100928.0akelts': {'Type': 'Island','Name': 'Padres Del Fuego','File': 'DelFuegoIsland','Hpr': VBase3(-59.144, 0.0, 0.0),'Objects': {'1142029069.97sdnaik': {'Type': 'LOD Sphere','Hpr': VBase3(96.557, 0.0, 0.0),'Pos': Point3(-444.148, -440.057, 454.457),'Radi': [2000.0, 2600.0, 6000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Phase': 1,'Pos': Point3(8757.1, -24577.305, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/islands/pir_m_are_isl_delFuego'}},'1264194816.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(17948.6, -11892.882, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0.0, 0.0, 1.0),'Model': 'models/misc/smiley'}},'1264194816.0kanpatel0': {'Type': 'Ship Movement Node','Name': 'TF_KINGSHEAD_10','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(14286.487, -12852.007, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264194944.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(8615.047, -12498.265, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264194944.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-2018.253, -9970.679, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264194944.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-5417.421, -7878.882, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264194944.0kanpatel2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-7247.745, -3869.609, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264195072.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-7509.217, 2057.153, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264195072.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-6116.716, 7115.231, 0.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264195072.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-4111.274, 12085.251, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264195072.0kanpatel2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-4721.626, 18014.387, 0.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264195072.0kanpatel3': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-7596.376, 22103.539, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264195072.0kanpatel4': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-11954.285, 26199.977, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264195200.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-20154.824, 29872.664, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264195968.0kanpatel': {'Type': 'Ship Movement Node','Name': 'TF_PADRES','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(3910.5, -24099.93, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196096.0kanpatel': {'Type': 'Ship Movement Node','Name': 'TF_PADRES','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-100.775, -21911.348, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196096.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-4110.049, -17989.227, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196096.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-8032.167, -13979.952, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196096.0kanpatel2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-10734.073, -9883.515, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196224.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-12500.0, -4600.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196224.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-11954.285, 836.938, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196224.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-12743.399, 6068.915, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196224.0kanpatel2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-15353.454, 9988.548, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196224.0kanpatel3': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-20000.0, 13500.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196224.0kanpatel4': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-25376.643, 17048.363, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196352.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-29048.529, 22025.273, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196352.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-30000.0, 26000.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196352.0kanpatel3': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-29658.879, 29959.852, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196864.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(29894.07, -4001.898, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196864.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(24740.074, 14.989, 0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196864.0kanpatel1': {'Type': 'Ship Movement Node','Name': 'TF_C','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(19952.059, 2101.175, 40.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196992.0kanpatel0': {'Type': 'Ship Movement Node','Name': 'TF_C','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(12159.232, 3342.153, 0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196992.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(5976.99, 4854.626, 40.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196992.0kanpatel2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(1189.987, 8644.823, 0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196992.0kanpatel3': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-776.762, 14829.401, 40.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196992.0kanpatel4': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-1617.306, 20861.758, 0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196992.0kanpatel5': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-4216.654, 26216.41, 0.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264196992.0kanpatel6': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-7958.929, 29929.738, 0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197376.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-27915.018, -21963.695, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197376.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-19893.244, -19958.254, 0.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197376.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-12569.015, -15947.366, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197504.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-8990.906, -10014.253, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197504.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-8000.0, -1500.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197504.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-9252.385, 6022.852, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197504.0kanpatel2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-6291.105, 12216.032, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197632.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-1000.0, 17500.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197632.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(5913.142, 22147.117, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264197632.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(10000.0, 30000.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198016.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-10036.806, -28143.156, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198016.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-3152.149, -24056.336, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198016.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(2079.441, -17952.809, 0.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198016.0kanpatel2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(6000.299, -11931.737, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198016.0kanpatel3': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(10183.891, -3826.019, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198016.0kanpatel4': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(13931.692, 4105.375, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198144.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(15943.161, 12390.423, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198144.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(20642.869, 19881.004, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198144.0kanpatel2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(25883.184, 26602.914, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198144.0kanpatel3': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(30068.457, 29916.262, -0.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198272.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-27991.391, -11931.74, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198400.0kanpatel': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-21018.736, -8096.781, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198400.0kanpatel0': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-14743.349, -4000.343, 40.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198400.0kanpatel1': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-10476.377, 1014.43, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198400.0kanpatel2': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-10040.411, 6071.64, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198400.0kanpatel3': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-12958.66, 10216.03, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198400.0kanpatel4': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-12000.0, 16050.68, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198400.0kanpatel5': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-7000.91, 22147.119, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198400.0kanpatel6': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-1000.54, 27000.15, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1264198400.0kanpatel7': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(5218.399, 30003.445, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1271348547.01akelts': {'Type': 'Island','Name': "Raven's Cove",'File': 'RavensCoveIsland','Environment': 'OpenSky','Hpr': VBase3(-45.724, 0.0, 0.0),'Minimap': False,'Objects': {'1264624863.65caoconno': {'Type': 'LOD Sphere','Hpr': VBase3(-89.652, 0.0, 0.0),'Pos': Point3(0.0, 0.0, 0.0),'Radi': [1300.0, 2000.0, 7000.0],'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': ''}},'Pos': Point3(-31670.057, 12398.319, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Undockable': False,'VisSize': '','Visibility': 'Section','Visual': {'Model': 'models/islands/pir_m_are_isl_ravensCove'}},'1301961642.84jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(499.578, -18144.457, 39.994),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1301961658.36jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-5394.646, -13968.187, 39.996),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1301961787.78jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(-45.724, 0.0, 0.0),'Pos': Point3(-13357.119, -14372.964, 0.006),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1301961875.5jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-19551.619, -15266.349, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1301961893.49jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(-45.724, 0.0, 0.0),'Pos': Point3(-23662.166, -5910.755, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1301961921.19jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(-45.724, 0.0, 0.0),'Pos': Point3(-10665.203, -4943.347, 40.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1301961947.74jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(538.598, -12235.405, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027202.34jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(14559.385, 11029.28, -0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027210.82jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(5122.244, 12460.488, 39.996),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027247.31jloehrle': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(4148.283, 18201.814, -0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027276.39jloehrle': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(7703.354, 24400.418, 39.992),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027297.28jloehrle': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(17793.635, 29823.191, -0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027328.71jloehrle': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(25174.354, 28335.57, -0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027345.21jloehrle': {'Type': 'Ship Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(24028.979, 18421.74, -0.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027700.44jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-24435.404, 3179.066, -0.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027714.3jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-17657.785, 11250.238, -0.004),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027734.68jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-12281.977, 19226.277, -0.012),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027809.08jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-15691.225, 26470.641, -0.008),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027835.68jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-25023.926, 27671.133, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027852.97jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-28043.447, 19938.322, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027866.65jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-24063.785, 12952.978, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1302027883.94jloehrle': {'Type': 'Ship Movement Node','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-25928.664, 7070.766, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}}}}},'Ocean Areas': [[Point3(11000, -1000, 0), Point3(-3000, 15000, 0), 'Windward_Passage', '1180566400.0dxschafe0'], [Point3(-29000, 21000, 0), Point3(-15000, 9000, 0), 'Brigand_Bay', '1180567552.0dxschafe'], [Point3(11000, 27000, 0), Point3(-3000, 21000, 0), 'Blackheart_Strait', '1180568320.0dxschafe'], [Point3(13000, 27000, 0), Point3(25000, 15000, 0), 'Salty_Flats', '1180569600.0dxschafe'], [Point3(25000, 13000, 0), Point3(13000, -1000, 0), 'Mar_de_Plata', '1180569728.0dxschafe0'], [Point3(17000, -3000, 0), Point3(3000, -15000, 0), 'Smugglers_Run', '1180570240.0dxschafe'], [Point3(19000, -9000, 0), Point3(21000, -15000, 0), 'Smugglers_Run', '1180570240.0dxschafe0'], [Point3(25000, -13000, 0), Point3(23000, -15000, 0), 'The_Hinterseas', '1180570368.0dxschafe'], [Point3(15000, -17000, 0), Point3(25000, -29000, 0), 'The_Hinterseas', '1180570368.0dxschafe0'], [Point3(25000, -11000, 0), Point3(23000, -9000, 0), 'Mar_de_Plata', '1180570496.0dxschafe'], [Point3(19000, -7000, 0), Point3(25000, -3000, 0), 'Mar_de_Plata', '1180570496.0dxschafe0'], [Point3(13000, -23000, 0), Point3(3000, -17000, 0), 'Boiling_Bay', '1180571008.0dxschafe'], [Point3(13000, -25000, 0), Point3(-15000, -29000, 0), 'Mariners_Reef', '1180571008.0dxschafe0'], [Point3(-3000, 19000, 0), Point3(3000, 17000, 0), 'Blackheart_Strait', '1180571264.0dxschafe'], [Point3(5000, 19000, 0), Point3(11000, 17000, 0), 'Windward_Passage', '1180571776.0dxschafe'], [Point3(-13000, 9000, 0), Point3(-5000, 27000, 0), 'Scurvy_Shallows', '1191538304.0dxschafe'], [Point3(-29000, -7000, 0), Point3(-13000, -15000, 0), 'Dead_Mans_Trough', '1191539072.0dxschafe0'], [Point3(-11000, -23000, 0), Point3(1000, -7000, 0), 'Leeward_Passage', '1191539328.0dxschafe'], [Point3(-15000, -17000, 0), Point3(-13000, -23000, 0), 'Leeward_Passage', '1191539328.0dxschafe0'], [Point3(-29000, 7000, 0), Point3(-5000, -5000, 0), 'Bloody_Bayou', '1191539328.0dxschafe1'], [Point3(-3000, -3000, 0), Point3(1000, -5000, 0), 'Bloody_Bayou', '1191539328.0dxschafe2']],'Node Links': [['1185235968.0dxschafe1', '1185235968.0dxschafe0', 'Bi-directional'], ['1185236224.0dxschafe', '1185236224.0dxschafe3', 'Bi-directional'], ['1185236480.0dxschafe', '1185236736.0dxschafe', 'Bi-directional'], ['1185236864.0dxschafe', '1185236480.0dxschafe1', 'Bi-directional'], ['1185237120.0dxschafe2', '1185237120.0dxschafe', 'Bi-directional'], ['1185237248.0dxschafe', '1185237120.0dxschafe1', 'Bi-directional'], ['1210197760.0WDIG4', '1210197632.0WDIG', 'Bi-directional'], ['1210197760.0WDIG5', '1210197760.0WDIG', 'Bi-directional'], ['1210197888.0WDIG', '1210197760.0WDIG0', 'Bi-directional'], ['1210197888.0WDIG0', '1210197760.0WDIG1', 'Bi-directional'], ['1210197760.0WDIG2', '1210197888.0WDIG1', 'Bi-directional'], ['1210197888.0WDIG2', '1210197760.0WDIG3', 'Bi-directional'], ['1264194816.0kanpatel', '1264194816.0kanpatel0', 'Direction 2'], ['1264194816.0kanpatel0', '1264194944.0kanpatel', 'Direction 2'], ['1264194944.0kanpatel', '1264194944.0kanpatel0', 'Direction 2'], ['1264194944.0kanpatel0', '1264194944.0kanpatel1', 'Direction 2'], ['1264194944.0kanpatel1', '1264194944.0kanpatel2', 'Direction 2'], ['1264194944.0kanpatel2', '1264195072.0kanpatel', 'Direction 2'], ['1264195072.0kanpatel', '1264195072.0kanpatel0', 'Direction 2'], ['1264195072.0kanpatel0', '1264195072.0kanpatel1', 'Direction 2'], ['1264195072.0kanpatel1', '1264195072.0kanpatel2', 'Direction 2'], ['1264195072.0kanpatel2', '1264195072.0kanpatel3', 'Direction 2'], ['1264195072.0kanpatel3', '1264195072.0kanpatel4', 'Direction 2'], ['1264195072.0kanpatel4', '1264195200.0kanpatel0', 'Direction 2'], ['1264195968.0kanpatel', '1264196096.0kanpatel', 'Direction 2'], ['1264196096.0kanpatel', '1264196096.0kanpatel0', 'Direction 2'], ['1264196096.0kanpatel0', '1264196096.0kanpatel1', 'Direction 2'], ['1264196096.0kanpatel1', '1264196096.0kanpatel2', 'Direction 2'], ['1264196096.0kanpatel2', '1264196224.0kanpatel', 'Direction 2'], ['1264196224.0kanpatel', '1264196224.0kanpatel0', 'Direction 2'], ['1264196224.0kanpatel0', '1264196224.0kanpatel1', 'Direction 2'], ['1264196224.0kanpatel1', '1264196224.0kanpatel2', 'Direction 2'], ['1264196224.0kanpatel2', '1264196224.0kanpatel3', 'Direction 2'], ['1264196224.0kanpatel3', '1264196224.0kanpatel4', 'Direction 2'], ['1264196224.0kanpatel4', '1264196352.0kanpatel', 'Direction 2'], ['1264196352.0kanpatel', '1264196352.0kanpatel0', 'Direction 2'], ['1264196352.0kanpatel0', '1264196352.0kanpatel3', 'Direction 2'], ['1264196864.0kanpatel', '1264196864.0kanpatel0', 'Direction 2'], ['1264196864.0kanpatel0', '1264196864.0kanpatel1', 'Direction 2'], ['1264196864.0kanpatel1', '1264196992.0kanpatel0', 'Direction 2'], ['1264196992.0kanpatel0', '1264196992.0kanpatel1', 'Direction 2'], ['1264196992.0kanpatel1', '1264196992.0kanpatel2', 'Direction 2'], ['1264196992.0kanpatel2', '1264196992.0kanpatel3', 'Direction 2'], ['1264196992.0kanpatel3', '1264196992.0kanpatel4', 'Direction 2'], ['1264196992.0kanpatel4', '1264196992.0kanpatel5', 'Direction 2'], ['1264196992.0kanpatel5', '1264196992.0kanpatel6', 'Direction 2'], ['1264197632.0kanpatel0', '1264197632.0kanpatel1', 'Direction 2'], ['1264197632.0kanpatel', '1264197632.0kanpatel0', 'Direction 2'], ['1264197504.0kanpatel2', '1264197632.0kanpatel', 'Direction 2'], ['1264197504.0kanpatel1', '1264197504.0kanpatel2', 'Direction 2'], ['1264197504.0kanpatel0', '1264197504.0kanpatel1', 'Direction 2'], ['1264197504.0kanpatel', '1264197504.0kanpatel0', 'Direction 2'], ['1264197376.0kanpatel1', '1264197504.0kanpatel', 'Direction 2'], ['1264197376.0kanpatel0', '1264197376.0kanpatel1', 'Direction 2'], ['1264197376.0kanpatel', '1264197376.0kanpatel0', 'Direction 2'], ['1264198016.0kanpatel', '1264198016.0kanpatel0', 'Direction 2'], ['1264198016.0kanpatel0', '1264198016.0kanpatel1', 'Direction 2'], ['1264198016.0kanpatel1', '1264198016.0kanpatel2', 'Direction 2'], ['1264198016.0kanpatel2', '1264198016.0kanpatel3', 'Direction 2'], ['1264198016.0kanpatel3', '1264198016.0kanpatel4', 'Direction 2'], ['1264198016.0kanpatel4', '1264198144.0kanpatel0', 'Direction 2'], ['1264198144.0kanpatel0', '1264198144.0kanpatel1', 'Direction 2'], ['1264198144.0kanpatel1', '1264198144.0kanpatel2', 'Direction 2'], ['1264198144.0kanpatel2', '1264198144.0kanpatel3', 'Direction 2'], ['1264198272.0kanpatel', '1264198400.0kanpatel', 'Direction 2'], ['1264198400.0kanpatel', '1264198400.0kanpatel0', 'Direction 2'], ['1264198400.0kanpatel0', '1264198400.0kanpatel1', 'Direction 2'], ['1264198400.0kanpatel1', '1264198400.0kanpatel2', 'Direction 2'], ['1264198400.0kanpatel2', '1264198400.0kanpatel3', 'Direction 2'], ['1264198400.0kanpatel3', '1264198400.0kanpatel4', 'Direction 2'], ['1264198400.0kanpatel4', '1264198400.0kanpatel5', 'Direction 2'], ['1264198400.0kanpatel5', '1264198400.0kanpatel6', 'Direction 2'], ['1264198400.0kanpatel6', '1264198400.0kanpatel7', 'Direction 2'], ['1301961642.84jloehrle', '1301961658.36jloehrle', 'Direction 2'], ['1301961658.36jloehrle', '1301961787.78jloehrle', 'Direction 2'], ['1301961787.78jloehrle', '1301961875.5jloehrle', 'Direction 2'], ['1301961875.5jloehrle', '1301961893.49jloehrle', 'Direction 2'], ['1301961893.49jloehrle', '1301961921.19jloehrle', 'Direction 2'], ['1301961921.19jloehrle', '1301961947.74jloehrle', 'Direction 2'], ['1301961642.84jloehrle', '1301961947.74jloehrle', 'Direction 1'], ['1302027202.34jloehrle', '1302027210.82jloehrle', 'Direction 2'], ['1302027210.82jloehrle', '1302027247.31jloehrle', 'Direction 2'], ['1302027247.31jloehrle', '1302027276.39jloehrle', 'Direction 2'], ['1302027276.39jloehrle', '1302027297.28jloehrle', 'Direction 2'], ['1302027297.28jloehrle', '1302027328.71jloehrle', 'Direction 2'], ['1302027328.71jloehrle', '1302027345.21jloehrle', 'Direction 2'], ['1302027202.34jloehrle', '1302027345.21jloehrle', 'Direction 1'], ['1302027700.44jloehrle', '1302027714.3jloehrle', 'Direction 2'], ['1302027714.3jloehrle', '1302027734.68jloehrle', 'Direction 2'], ['1302027734.68jloehrle', '1302027809.08jloehrle', 'Direction 2'], ['1302027809.08jloehrle', '1302027835.68jloehrle', 'Direction 2'], ['1302027835.68jloehrle', '1302027852.97jloehrle', 'Direction 2'], ['1302027852.97jloehrle', '1302027866.65jloehrle', 'Direction 2'], ['1302027866.65jloehrle', '1302027883.94jloehrle', 'Direction 2'], ['1302027700.44jloehrle', '1302027883.94jloehrle', 'Direction 1']],'Layers': {'Collisions': ['1184008208.59kmuller', '1184016064.62kmuller', '1184013852.84kmuller', '1185822696.06kmuller', '1184006140.32kmuller', '1184002350.98kmuller', '1184007573.29kmuller', '1184021176.59kmuller', '1184005963.59kmuller', '1188324241.31akelts', '1184006537.34kmuller', '1184006605.81kmuller', '1187139568.33kmuller', '1188324186.98akelts', '1184006730.66kmuller', '1184007538.51kmuller', '1184006188.41kmuller', '1184021084.27kmuller', '1185824396.94kmuller', '1185824250.16kmuller', '1185823630.52kmuller', '1185823760.23kmuller', '1185824497.83kmuller', '1185824751.45kmuller', '1187739103.34akelts', '1188323993.34akelts', '1184016538.29kmuller', '1185822200.97kmuller', '1184016225.99kmuller', '1195241421.34akelts', '1195242796.08akelts', '1184020642.13kmuller', '1195237994.63akelts', '1184020756.88kmuller', '1184020833.4kmuller', '1185820992.97kmuller', '1185821053.83kmuller', '1184015068.54kmuller', '1184014935.82kmuller', '1185821432.88kmuller', '1185821701.86kmuller', '1195240137.55akelts', '1195241539.38akelts', '1195238422.3akelts', '1195238473.22akelts', '1185821453.17kmuller', '1184021269.96kmuller', '1185821310.89kmuller', '1185821165.59kmuller', '1185821199.36kmuller', '1185822035.98kmuller', '1184015806.59kmuller', '1185822059.48kmuller', '1185920461.76kmuller', '1194984449.66akelts', '1185824206.22kmuller', '1184003446.23kmuller', '1184003254.85kmuller', '1184003218.74kmuller', '1184002700.44kmuller', '1186705073.11kmuller', '1187658531.86akelts', '1186705214.3kmuller', '1185824927.28kmuller', '1184014204.54kmuller', '1184014152.84kmuller']},'ObjectIds': {'1141410776.53sdnaik': '["Objects"]["1141410776.53sdnaik"]','1142029069.97sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1233100928.0akelts"]["Objects"]["1142029069.97sdnaik"]','1150922126.8dzlu': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1150922126.8dzlu"]','1156207188.95dzlu': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1156207188.95dzlu"]','1156359855.24bbathen': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1156359855.24bbathen"]','1157060429.94sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1150922126.8dzlu"]["Objects"]["1157060429.94sdnaik"]','1158214327.11sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1156207188.95dzlu"]["Objects"]["1158214327.11sdnaik"]','1158296490.13sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1156359855.24bbathen"]["Objects"]["1158296490.13sdnaik"]','1159933206.48sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1159933206.48sdnaik"]','1160614528.73sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1160614528.73sdnaik"]','1161282725.84kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1161282725.84kmuller"]','1161664293.39sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1161282725.84kmuller"]["Objects"]["1161664293.39sdnaik"]','1162600600.5sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1159933206.48sdnaik"]["Objects"]["1162600600.5sdnaik"]','1163119750.53sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1160614528.73sdnaik"]["Objects"]["1163119750.53sdnaik"]','1164135492.81dzlu': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1164135492.81dzlu"]','1164150392.42dzlu': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1164150392.42dzlu"]','1164157132.99dzlu': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1164157132.99dzlu"]','1164760526.77sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1173381952.2sdnaik"]["Objects"]["1164760526.77sdnaik"]','1164760719.72sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1164135492.81dzlu"]["Objects"]["1164760719.72sdnaik"]','1164763706.66sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1164763706.66sdnaik"]','1164763735.42sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1164763706.66sdnaik"]["Objects"]["1164763735.42sdnaik"]','1164763977.22sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1164157132.99dzlu"]["Objects"]["1164763977.22sdnaik"]','1173381952.2sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1173381952.2sdnaik"]','1173381974.5sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1164150392.42dzlu"]["Objects"]["1173381974.5sdnaik"]','1173382404.64sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1173382404.64sdnaik"]','1173382432.38sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1173382404.64sdnaik"]["Objects"]["1173382432.38sdnaik"]','1185235968.0dxschafe0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185235968.0dxschafe0"]','1185235968.0dxschafe1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185235968.0dxschafe1"]','1185236224.0dxschafe': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185236224.0dxschafe"]','1185236224.0dxschafe3': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185236224.0dxschafe3"]','1185236480.0dxschafe': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185236480.0dxschafe"]','1185236480.0dxschafe1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185236480.0dxschafe1"]','1185236736.0dxschafe': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185236736.0dxschafe"]','1185236864.0dxschafe': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185236864.0dxschafe"]','1185237120.0dxschafe': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185237120.0dxschafe"]','1185237120.0dxschafe1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185237120.0dxschafe1"]','1185237120.0dxschafe2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185237120.0dxschafe2"]','1185237248.0dxschafe': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1185237248.0dxschafe"]','1196970035.53sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1196970035.53sdnaik"]','1196970080.56sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1196970080.56sdnaik"]','1196970432.69sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1196970035.53sdnaik"]["Objects"]["1196970432.69sdnaik"]','1196970440.66sdnaik': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1196970080.56sdnaik"]["Objects"]["1196970440.66sdnaik"]','1201636857.8kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201636857.8kmuller"]','1201641372.09kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641372.09kmuller"]','1201641393.2kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641393.2kmuller"]','1201641405.3kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641405.3kmuller"]','1201641438.69kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641438.69kmuller"]','1201641487.41kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641487.41kmuller"]','1210197632.0WDIG': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197632.0WDIG"]','1210197760.0WDIG': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197760.0WDIG"]','1210197760.0WDIG0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197760.0WDIG0"]','1210197760.0WDIG1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197760.0WDIG1"]','1210197760.0WDIG2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197760.0WDIG2"]','1210197760.0WDIG3': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197760.0WDIG3"]','1210197760.0WDIG4': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197760.0WDIG4"]','1210197760.0WDIG5': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197760.0WDIG5"]','1210197888.0WDIG': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197888.0WDIG"]','1210197888.0WDIG0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197888.0WDIG0"]','1210197888.0WDIG1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197888.0WDIG1"]','1210197888.0WDIG2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1210197888.0WDIG2"]','1210981042.09kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201636857.8kmuller"]["Objects"]["1210981042.09kmuller"]','1210981113.03kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641372.09kmuller"]["Objects"]["1210981113.03kmuller"]','1210981144.78kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641393.2kmuller"]["Objects"]["1210981144.78kmuller"]','1210981190.74kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641487.41kmuller"]["Objects"]["1210981190.74kmuller"]','1210981205.23kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641405.3kmuller"]["Objects"]["1210981205.23kmuller"]','1210981266.57kmuller': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1201641438.69kmuller"]["Objects"]["1210981266.57kmuller"]','1233100928.0akelts': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1233100928.0akelts"]','1264194816.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264194816.0kanpatel"]','1264194816.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264194816.0kanpatel0"]','1264194944.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264194944.0kanpatel"]','1264194944.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264194944.0kanpatel0"]','1264194944.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264194944.0kanpatel1"]','1264194944.0kanpatel2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264194944.0kanpatel2"]','1264195072.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264195072.0kanpatel"]','1264195072.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264195072.0kanpatel0"]','1264195072.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264195072.0kanpatel1"]','1264195072.0kanpatel2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264195072.0kanpatel2"]','1264195072.0kanpatel3': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264195072.0kanpatel3"]','1264195072.0kanpatel4': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264195072.0kanpatel4"]','1264195200.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264195200.0kanpatel0"]','1264195968.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264195968.0kanpatel"]','1264196096.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196096.0kanpatel"]','1264196096.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196096.0kanpatel0"]','1264196096.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196096.0kanpatel1"]','1264196096.0kanpatel2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196096.0kanpatel2"]','1264196224.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196224.0kanpatel"]','1264196224.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196224.0kanpatel0"]','1264196224.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196224.0kanpatel1"]','1264196224.0kanpatel2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196224.0kanpatel2"]','1264196224.0kanpatel3': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196224.0kanpatel3"]','1264196224.0kanpatel4': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196224.0kanpatel4"]','1264196352.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196352.0kanpatel"]','1264196352.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196352.0kanpatel0"]','1264196352.0kanpatel3': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196352.0kanpatel3"]','1264196864.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196864.0kanpatel"]','1264196864.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196864.0kanpatel0"]','1264196864.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196864.0kanpatel1"]','1264196992.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196992.0kanpatel0"]','1264196992.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196992.0kanpatel1"]','1264196992.0kanpatel2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196992.0kanpatel2"]','1264196992.0kanpatel3': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196992.0kanpatel3"]','1264196992.0kanpatel4': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196992.0kanpatel4"]','1264196992.0kanpatel5': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196992.0kanpatel5"]','1264196992.0kanpatel6': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264196992.0kanpatel6"]','1264197376.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197376.0kanpatel"]','1264197376.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197376.0kanpatel0"]','1264197376.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197376.0kanpatel1"]','1264197504.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197504.0kanpatel"]','1264197504.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197504.0kanpatel0"]','1264197504.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197504.0kanpatel1"]','1264197504.0kanpatel2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197504.0kanpatel2"]','1264197632.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197632.0kanpatel"]','1264197632.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197632.0kanpatel0"]','1264197632.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264197632.0kanpatel1"]','1264198016.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198016.0kanpatel"]','1264198016.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198016.0kanpatel0"]','1264198016.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198016.0kanpatel1"]','1264198016.0kanpatel2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198016.0kanpatel2"]','1264198016.0kanpatel3': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198016.0kanpatel3"]','1264198016.0kanpatel4': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198016.0kanpatel4"]','1264198144.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198144.0kanpatel0"]','1264198144.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198144.0kanpatel1"]','1264198144.0kanpatel2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198144.0kanpatel2"]','1264198144.0kanpatel3': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198144.0kanpatel3"]','1264198272.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198272.0kanpatel"]','1264198400.0kanpatel': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198400.0kanpatel"]','1264198400.0kanpatel0': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198400.0kanpatel0"]','1264198400.0kanpatel1': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198400.0kanpatel1"]','1264198400.0kanpatel2': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198400.0kanpatel2"]','1264198400.0kanpatel3': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198400.0kanpatel3"]','1264198400.0kanpatel4': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198400.0kanpatel4"]','1264198400.0kanpatel5': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198400.0kanpatel5"]','1264198400.0kanpatel6': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198400.0kanpatel6"]','1264198400.0kanpatel7': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1264198400.0kanpatel7"]','1264624863.65caoconno': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1271348547.01akelts"]["Objects"]["1264624863.65caoconno"]','1271348547.01akelts': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1271348547.01akelts"]','1301961642.84jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1301961642.84jloehrle"]','1301961658.36jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1301961658.36jloehrle"]','1301961787.78jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1301961787.78jloehrle"]','1301961875.5jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1301961875.5jloehrle"]','1301961893.49jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1301961893.49jloehrle"]','1301961921.19jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1301961921.19jloehrle"]','1301961947.74jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1301961947.74jloehrle"]','1302027202.34jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027202.34jloehrle"]','1302027210.82jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027210.82jloehrle"]','1302027247.31jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027247.31jloehrle"]','1302027276.39jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027276.39jloehrle"]','1302027297.28jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027297.28jloehrle"]','1302027328.71jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027328.71jloehrle"]','1302027345.21jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027345.21jloehrle"]','1302027700.44jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027700.44jloehrle"]','1302027714.3jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027714.3jloehrle"]','1302027734.68jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027734.68jloehrle"]','1302027809.08jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027809.08jloehrle"]','1302027835.68jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027835.68jloehrle"]','1302027852.97jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027852.97jloehrle"]','1302027866.65jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027866.65jloehrle"]','1302027883.94jloehrle': '["Objects"]["1141410776.53sdnaik"]["Objects"]["1302027883.94jloehrle"]'}}
extraInfo = {'camPos': Point3(-33114.7, -40175.2, 54648.8),'camHpr': VBase3(-11.9216, -46.9344, 0),'focalLength': 1.39999997616,'skyState': 2,'fog': 0,'cameraSettings': {'overhead': {'camPos': Point3(-1059.51, -121.796, 99609.5),'camHpr': VBase3(0, -90, 0),'focalLength': 1.39999997616}}}
| 22,225
| 66,327
| 0.660847
| 9,326
| 66,675
| 4.707377
| 0.122453
| 0.044236
| 0.042368
| 0.040364
| 0.605476
| 0.485
| 0.429034
| 0.424159
| 0.418852
| 0.417621
| 0
| 0.291302
| 0.059753
| 66,675
| 3
| 66,328
| 22,225
| 0.408974
| 0
| 0
| 0
| 0
| 0
| 0.5734
| 0.2864
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
7890e40e743ec50a5c905d44300364e6c3123d77
| 55,886
|
py
|
Python
|
python/fmm3dpy/fmm3d.py
|
LiangJiuyang/FMM3D
|
2221ef08ef1b0ee227b6dcade2b2800dc0060c7c
|
[
"Apache-2.0"
] | 71
|
2019-06-03T21:22:37.000Z
|
2022-03-03T01:15:45.000Z
|
python/fmm3dpy/fmm3d.py
|
LiangJiuyang/FMM3D
|
2221ef08ef1b0ee227b6dcade2b2800dc0060c7c
|
[
"Apache-2.0"
] | 14
|
2019-08-22T19:58:36.000Z
|
2022-02-08T19:01:06.000Z
|
python/fmm3dpy/fmm3d.py
|
LiangJiuyang/FMM3D
|
2221ef08ef1b0ee227b6dcade2b2800dc0060c7c
|
[
"Apache-2.0"
] | 23
|
2019-09-13T21:30:35.000Z
|
2022-02-26T12:34:42.000Z
|
from . import hfmm3d_fortran as hfmm
from . import lfmm3d_fortran as lfmm
from . import emfmm3d_fortran as emfmm
from . import stfmm3d_fortran as stfmm
import numpy as np
import numpy.linalg as la
class Output():
pot = None
grad = None
hess = None
pottarg = None
gradtarg = None
hesstarg = None
E = None
curlE = None
divE = None
Etarg = None
curlEtarg = None
divEtarg = None
pre = None
pretarg = None
ier = 0
def hfmm3d(*,eps,zk,sources,charges=None,dipvec=None,
targets=None,pg=0,pgt=0,nd=1):
r"""
This subroutine computes the N-body Helmholtz interactions
in three dimensions where the interaction kernel is given by e^{ikr}/r
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} \\frac{e^{ik \|x-x_{j}\|}}{\|x-x_{j}\|} - v_{j} \cdot \\nabla \left( \\frac{e^{ik \|x-x_{j}\|}}{\|x-x_{j}\|} \\right) \, ,
where $c_{j}$ are the charge densities,
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When $x=x_{m}$, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps (float): precision requested
zk (complex): Helmholtz parameter
sources (float(3,n)): source locations ($x_{j}$)
charges (complex(nd,n) or complex(n)): charge densities ($c_{j}$)
dipvec (complex(nd,3,n) or complex(3,n)): dipole orientation vectors ($v_{j}$)
targets (float(3,nt)): target locations (x)
pg (integer): source eval flag. Potential at sources evaluated if pg = 1. Potenial and gradient at sources evaluated if pg=2
pgt (integer): target eval flag. Potential at targets evaluated if pgt = 1. Potenial and gradient at targets evaluated if pgt=2
nd (integer): number of densities
Returns:
Returns an object of type Output (out) with the following variables
out.pot: potential at source locations if requested
out.grad: gradient at source locations if requested
out.pottarg: potential at target locations if requested
out.gradtarg: gradient at target locations if requested
Example:
see hmmexample.py
r"""
out = Output()
assert sources.shape[0] == 3, "The first dimension of sources must be 3"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
iftarg = 0
if(pg == 0 and pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None):
if nd == 1 and ns>1:
assert dipvec.shape[0] == 3 and dipvec.shape[1] == ns, "dipole vectors must be of shape [3,number of sources]"
if nd == 1 and ns==1:
assert dipvec.shape[0] == 3, "dipole vectors must be of shape [3,number of sources]"
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 3 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
if(targets is not None):
assert targets.shape[0] == 3, "The first dimension of targets must be 3"
iftarg = 1
if(iftarg == 0 or pgt != 1 or pgt !=2):
if(pg == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.ier = hfmm.hfmm3d_s_c_p_vec(eps,zk,sources,charges,nd)
if(nd == 1):
out.pot,out.ier = hfmm.hfmm3d_s_c_p(eps,zk,sources,charges)
if(pg == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.ier = hfmm.hfmm3d_s_c_g_vec(eps,zk,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.ier = hfmm.hfmm3d_s_c_g(eps,zk,sources,charges)
if(pg == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = hfmm.hfmm3d_s_d_p_vec(eps,zk,sources,dipvec,nd)
if(nd == 1):
out.pot,out.ier = hfmm.hfmm3d_s_d_p(eps,zk,sources,dipvec)
if(pg == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = hfmm.hfmm3d_s_d_g_vec(eps,zk,sources,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = hfmm.hfmm3d_s_d_g(eps,zk,sources,dipvec)
if(pg == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = hfmm.hfmm3d_s_cd_p_vec(eps,zk,sources,charges,dipvec,nd)
if(nd == 1):
out.pot,out.ier = hfmm.hfmm3d_s_cd_p(eps,zk,sources,charges,dipvec)
if(pg == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = hfmm.hfmm3d_s_cd_g_vec(eps,zk,sources,charges,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = hfmm.hfmm3d_s_cd_g(eps,zk,sources,charges,dipvec)
if(pg !=1 and pg !=2 and targets is not None):
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.ier = hfmm.hfmm3d_t_c_p_vec(eps,zk,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.ier = hfmm.hfmm3d_t_c_p(eps,zk,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_t_c_g_vec(eps,zk,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_t_c_g(eps,zk,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = hfmm.hfmm3d_t_d_p_vec(eps,zk,sources,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = hfmm.hfmm3d_t_d_p(eps,zk,sources,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_t_d_g_vec(eps,zk,sources,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_t_d_g(eps,zk,sources,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = hfmm.hfmm3d_t_cd_p_vec(eps,zk,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = hfmm.hfmm3d_t_cd_p(eps,zk,sources,charges,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_t_cd_g_vec(eps,zk,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_t_cd_g(eps,zk,sources,charges,dipvec,targets)
if((pg == 1 or pg == 2) and targets is not None):
assert pg == pgt, "if both potential or potential at gradient are requested at sources and targets, then the same pg must be equal to pgt"
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm3d_st_c_p_vec(eps,zk,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm3d_st_c_p(eps,zk,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_st_c_g_vec(eps,zk,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_st_c_g(eps,zk,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm3d_st_d_p_vec(eps,zk,sources,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm3d_st_d_p(eps,zk,sources,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_st_d_g_vec(eps,zk,sources,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_st_d_g(eps,zk,sources,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm3d_st_cd_p_vec(eps,zk,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = hfmm.hfmm3d_st_cd_p(eps,zk,sources,charges,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_st_cd_g_vec(eps,zk,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = hfmm.hfmm3d_st_cd_g(eps,zk,sources,charges,dipvec,targets)
return out
def lfmm3d(*,eps,sources,charges=None,dipvec=None,
targets=None,pg=0,pgt=0,nd=1):
r"""
This subroutine computes the N-body Laplace interactions
in three dimensions where the interaction kernel is given by 1/r
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} / \|x-x_{j}\| + v_{j} \cdot \\nabla( 1/\|x-x_{j}\|) \, ,
where $c_{j}$ are the charge densities,
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When $x=x_{m}$, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps: float
precision requested
sources: float(3,n)
source locations (x_{j})
charges: float(nd,n) or float(n)
charge densities (c_{j})
dipole: float(nd,3,n) or float(3,n)
dipole orientation vectors (v_{j})
targets: float(3,nt)
target locations (x)
pg: integer
source eval flag
potential at sources evaluated if pg = 1
potenial and gradient at sources evaluated if pg=2
potential, gradient and hessian at sources evaluated if pg=3
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
potential, gradient and hessian at targets evaluated if pgt=3
nd: integer
number of densities
Returns:
out.pot: potential at source locations if requested
out.grad: gradient at source locations if requested
out.hess: hessian at source locations if requested
out.pottarg: potential at target locations if requested
out.gradtarg: gradient at target locations if requested
out.hesstarg: hessian at target locations if requested
Example:
see lfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 3, "The first dimension of sources must be 3"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
iftarg = 0
if(pg == 0 and pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None):
if nd == 1 and ns>1:
assert dipvec.shape[0] == 3 and dipvec.shape[1] == ns, "dipole vectors must be of shape [3,number of sources]"
if nd == 1 and ns==1:
assert dipvec.shape[0] == 3, "dipole vectors must be of shape [3,number of sources]"
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 3 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
if(targets is not None):
assert targets.shape[0] == 3, "The first dimension of targets must be 3"
iftarg = 1
#
# sources -> sources routines
#
if(iftarg == 0 or pgt != 1 or pgt !=2 or pgt !=3):
if(pg == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.ier = lfmm.lfmm3d_s_c_p_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.ier = lfmm.lfmm3d_s_c_p(eps,sources,charges)
if(pg == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.lfmm3d_s_c_g_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.lfmm3d_s_c_g(eps,sources,charges)
if(pg == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm3d_s_c_h_vec(eps,sources,charges,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm3d_s_c_h(eps,sources,charges)
if(pg == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = lfmm.lfmm3d_s_d_p_vec(eps,sources,dipvec,nd)
if(nd == 1):
out.pot,out.ier = lfmm.lfmm3d_s_d_p(eps,sources,dipvec)
if(pg == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.lfmm3d_s_d_g_vec(eps,sources,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.lfmm3d_s_d_g(eps,sources,dipvec)
if(pg == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm3d_s_d_h_vec(eps,sources,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm3d_s_d_h(eps,sources,dipvec)
if(pg == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.ier = lfmm.lfmm3d_s_cd_p_vec(eps,sources,charges,dipvec,nd)
if(nd == 1):
out.pot,out.ier = lfmm.lfmm3d_s_cd_p(eps,sources,charges,dipvec)
if(pg == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.ier = lfmm.lfmm3d_s_cd_g_vec(eps,sources,charges,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.ier = lfmm.lfmm3d_s_cd_g(eps,sources,charges,dipvec)
if(pg == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm3d_s_cd_h_vec(eps,sources,charges,dipvec,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.ier = lfmm.lfmm3d_s_cd_h(eps,sources,charges,dipvec)
#
# sources -> targets routines
#
if(pg !=1 and pg !=2 and pg !=3 and targets is not None):
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.ier = lfmm.lfmm3d_t_c_p_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.lfmm3d_t_c_p(eps,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_t_c_g_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_t_c_g(eps,sources,charges,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_t_c_h_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_t_c_h(eps,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = lfmm.lfmm3d_t_d_p_vec(eps,sources,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.lfmm3d_t_d_p(eps,sources,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_t_d_g_vec(eps,sources,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_t_d_g(eps,sources,dipvec,targets)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_t_d_h_vec(eps,sources,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_t_d_h(eps,sources,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.ier = lfmm.lfmm3d_t_cd_p_vec(eps,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.ier = lfmm.lfmm3d_t_cd_p(eps,sources,charges,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_t_cd_g_vec(eps,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_t_cd_g(eps,sources,charges,dipvec,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_t_cd_h_vec(eps,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_t_cd_h(eps,sources,charges,dipvec,targets)
#
# sources to sources + targets
#
if((pg == 1 or pg == 2 or pg == 3) and targets is not None):
assert pg == pgt, "if output is requested at both sources and targets, then the same pg must be equal to pgt"
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm3d_st_c_p_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm3d_st_c_p(eps,sources,charges,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_st_c_g_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_st_c_g(eps,sources,charges,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_st_c_h_vec(eps,sources,charges,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_st_c_h(eps,sources,charges,targets)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm3d_st_d_p_vec(eps,sources,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm3d_st_d_p(eps,sources,dipvec,targets)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_st_d_g_vec(eps,sources,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_st_d_g(eps,sources,dipvec,targets)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_st_d_h_vec(eps,sources,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_st_d_h(eps,sources,dipvec,targets)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm3d_st_cd_p_vec(eps,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pot,out.pottarg,out.ier = lfmm.lfmm3d_st_cd_p(eps,sources,charges,dipvec,targets)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_st_cd_g_vec(eps,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.pottarg,out.gradtarg,out.ier = lfmm.lfmm3d_st_cd_g(eps,sources,charges,dipvec,targets)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
if(nd > 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_st_cd_h_vec(eps,sources,charges,dipvec,targets,nd)
if(nd == 1):
out.pot,out.grad,out.hess,out.pottarg,out.gradtarg,out.hesstarg,out.ier = lfmm.lfmm3d_st_cd_h(eps,sources,charges,dipvec,targets)
return out
def emfmm3d(*,eps,zk,sources,h_current=None,e_current=None,e_charge=None,targets=None,ifE=0,ifcurlE=0,ifdivE=0,nd=1):
r"""
This function subrourine computes
E = curl S_{k}[h_current] + S_{k}[e_current] + grad S_{k}[e_charge] -- (1)
using the vector Helmholtz fmm.
The subroutine also computes divE, curlE
with appropriate flags
Remark: the subroutine uses a stabilized representation
for computing the divergence by using integration by parts
wherever possible. If the divergence is not requested, then the
helmholtz fmm is called with 3*nd densities, while if the divergence
is requested, then the helmholtz fmm is calld with 4*nd densities
Args:
eps (float): precision requested
zk (complex): Helmholtz parameter
sources (float(3,n)): source locations
h_current (complex(nd,3,n) or complex(3,n)): a vector source
e_current (complex(nd,3,n) or complex(3,n)): b vector source
e_charge (complex(nd,n) or complex(n)): e_charge source
targets (float(3,nt)): target locations
ifE (integer): E is returned at the target locations if ifE = 1
ifcurlE (integer): curl E is returned at the target locations if ifcurlE = 1
ifdivE (integer): div E is returned at the target locations if ifdivE = 1
nd (integer): number of densities
Returns:
Returns an object of type Output (out) with the following variables
out.E: E field defined in (1) above at target locations if requested
out.curlE: curl of E field at target locations if requested
out.divE: divergence of E at target locations if requested
Example:
see emfmmexample.py
r"""
out = Output()
if(targets == None):
print("Nothing to compute, set targets")
return out
if(ifE == 0 and ifcurlE == 0 and ifdivE == 0):
print("Nothing to compute, set either ifE, ifcurlE or ifdivE to non-zero")
return out
assert sources.shape[0] == 3, "The first dimension of sources must be 3"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
assert targets.shape[0] == 3, "The first dimension of targets must be 3"
if(np.size(np.shape(targets))==2):
nt = targets.shape[1]
if(np.size(np.shape(targets))==1):
nt = 1
ifh_current = 0
ife_current = 0
ife_charge = 0
if(h_current is not None):
if(nd == 1 and ns>1):
assert h_current.shape[0] == 3 and h_current.shape[1] == ns, "h_current vectors must be of shape [3,number of sources]"
if(nd == 1 and ns==1):
assert h_current.shape[0] == 3, "h_current vectors must be of shape [3,number of sources]"
if(nd>1):
assert h_current.shape[0] == nd and h_current.shape[1] == 3 and h_current.shape[2] == ns, "h_current vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
h_current = h_current.reshape([nd,3,ns])
ifh_current = 1
else:
h_current = np.zeros([nd,3,ns],dtype=complex)
if(e_current is not None):
if(nd == 1 and ns>1):
assert e_current.shape[0] == 3 and e_current.shape[1] == ns, "e_current vectors must be of shape [3,number of sources]"
if(nd == 1 and ns==1):
assert e_current.shape[0] == 3, "e_current vectors must be of shape [3,number of sources]"
if(nd>1):
assert e_current.shape[0] == nd and e_current.shape[1] == 3 and e_current.shape[2] == ns, "e_current vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
e_current = e_current.reshape([nd,3,ns])
ife_current = 1
else:
e_current = np.zeros([nd,3,ns],dtype=complex)
if(e_charge is not None):
if(nd == 1):
assert e_charge.shape[0] == ns, "e_charge must be same length as second dimension of sources"
if(nd>1):
assert e_charge.shape[0] == nd and e_charge.shape[1]==ns, "e_charge must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
e_charge = e_charge.reshape([nd,ns])
ife_charge = 1
else:
e_charge = np.zeros([nd,ns],dtype=complex)
out.E,out.curlE,out.divE,out.ier = emfmm.emfmm3d(eps,zk,sources,ifh_current,h_current,ife_current,e_current,ife_charge,e_charge,targets,ifE,ifcurlE,ifdivE,nd,ns,nt)
if(ifE==0):
out.E = None
if(ifcurlE==0):
out.curlE = None
if(ifdivE==0):
out.divE = None
return out
def stfmm3d(*,eps,sources,stoklet=None,strslet=None,strsvec=None,targets=None,ifppreg=0,ifppregtarg=0,nd=1):
r"""
Stokes FMM in R^{3}: evaluate all pairwise particle
interactions (ignoring self-interactions) and
interactions with targs.
This routine computes sums of the form
u(x) = sum_m G_{ij}(x,y^{(m)}) sigma^{(m)}_j
+ sum_m T_{ijk}(x,y^{(m)}) mu^{(m)}_j nu^{(m)}_k
where sigma^{(m)} is the Stokeslet charge, mu^{(m)} is the
stresslet charge, and nu^{(m)} is the stresslet orientation
(note that each of these is a 3 vector per source point y^{(m)}).
For x a source point, the self-interaction in the sum is omitted.
Optionally, the associated pressure p(x) and gradient grad u(x)
are returned
p(x) = sum_m P_j(x,y^m) sigma^{(m)}_j
+ sum_m T_{ijk}(x,y^{(m)}) PI_{jk} mu^{(m)}_j nu^{(m)}_k
grad u(x) = grad[sum_m G_{ij}(x,y^m) sigma^{(m)}_j
+ sum_m T_{ijk}(x,y^{(m)}) mu^{(m)}_j nu^{(m)}_k]
Args:
eps: float
precision requested
sources: float(3,n)
source locations
stoklet: float(nd,3,n) or float(3,n)
Stokeslet charge strengths (sigma vectors above)
strslet: float(nd,3,n) or float(3,n)
stresslet strengths (mu vectors above)
strsvec: float(nd,3,n) or float(3,n)
stresslet orientations (nu vectors above)
targets: float(3,nt)
target locations (x)
ifppreg: integer
flag for evaluating potential, gradient, and pressure
at the sources
ifppreg = 1, only potential
ifppreg = 2, potential and pressure
ifppreg = 3, potential, pressure, and gradient
ifppregtarg: integer
flag for evaluating potential, gradient, and pressure
at the targets
ifppregtarg = 1, only potential
ifppregtarg = 2, potential and pressure
ifppregtarg = 3, potential, pressure, and gradient
nd: integer
number of densities
Returns:
out.pot: velocity at source locations if requested
out.pre: pressure at source locations if requested
out.grad: gradient of velocity at source locations if requested
out.pottarg: velocity at target locations if requested
out.pretarg: pressure at target locations if requested
out.gradtarg: gradient of velocity at target locations if requested
Example:
see stfmmexample.py
r"""
out = Output()
if(ifppreg == 0 and ifppregtarg == 0):
print("Nothing to compute, set either ifppreg or ifppregtarg to non-zero")
return out
if(stoklet == None and strslet == None and strsvec == None):
print("Nothing to compute, set either stoklet or strslet+strsvec to non-None")
return out
if(strslet is not None and strsvec is None):
print("strslet and strsvec mush be both None or both not None")
return out
if(strslet is None and strsvec is not None):
print("strslet and strsvec mush be both None or both not None")
return out
assert sources.shape[0] == 3, "The first dimension of sources must be 3"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
if(targets is not None):
assert targets.shape[0] == 3, "The first dimension of targets must be 3"
if(np.size(np.shape(targets))==2):
nt = targets.shape[1]
if(np.size(np.shape(targets))==1):
nt = 1
else:
targets = np.zeros([3,0],dtype='double')
nt = 0
ifstoklet = 0
ifstrslet = 0
if(stoklet is not None):
if(nd == 1 and ns>1):
assert stoklet.shape[0] == 3 and stoklet.shape[1] == ns, "stoklet vectors must be of shape [3,number of sources]"
if(nd == 1 and ns==1):
assert stoklet.shape[0] == 3, "stoklet vectors must be of shape [3,number of sources]"
if(nd>1):
assert stoklet.shape[0] == nd and stoklet.shape[1] == 3 and stoklet.shape[2] == ns, "stoklet vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
stoklet = stoklet.reshape([nd,3,ns])
ifstoklet = 1
else:
stoklet = np.zeros([nd,3,ns],dtype='double')
if(strslet is not None and strsvec is not None):
if(nd == 1 and ns>1):
assert strslet.shape[0] == 3 and strslet.shape[1] == ns, "strslet vectors must be of shape [3,number of sources]"
assert strsvec.shape[0] == 3 and strsvec.shape[1] == ns, "strsvec vectors must be of shape [3,number of sources]"
if(nd == 1 and ns==1):
assert strslet.shape[0] == 3, "strslet vectors must be of shape [3,number of sources]"
assert strsvec.shape[0] == 3, "strsvec vectors must be of shape [3,number of sources]"
if(nd>1):
assert strslet.shape[0] == nd and strslet.shape[1] == 3 and strslet.shape[2] == ns, "strslet vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
assert strsvec.shape[0] == nd and strsvec.shape[1] == 3 and strsvec.shape[2] == ns, "strsvec vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
strslet = strslet.reshape([nd,3,ns])
strsvec = strsvec.reshape([nd,3,ns])
ifstrslet = 1
else:
strslet = np.zeros([nd,3,ns],dtype='double')
strsvec = np.zeros([nd,3,ns],dtype='double')
out.pot,out.pre,out.grad,out.pottarg,out.pretarg,out.gradtarg,out.ier = stfmm.stfmm3d(eps,sources,ifstoklet,stoklet,ifstrslet,strslet,strsvec,ifppreg,targets,ifppregtarg,nd,ns,nt)
if(ifppreg < 3):
out.grad = None
if(ifppregtarg < 3):
out.gradtarg = None
if(ifppreg < 2):
out.pre = None
if(ifppregtarg < 2):
out.pretarg = None
if(ifppreg < 1):
out.pot = None
if(ifppregtarg < 1):
out.pottarg = None
return out
def h3ddir(*,zk,sources,targets,charges=None,dipvec=None,
pgt=0,nd=1,thresh=1e-16):
r"""
This subroutine computes the N-body Helmholtz interactions
in three dimensions where the interaction kernel is given by $e^{ikr}/r$
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} e^{ik |x-x_{j}|}/|x-x_{j}| - \\nabla( e^{ik |x-x_{j}|}/|x-x_{j}|) \cdot v_{j} \, ,
where $c_{j}$ are the charge densities,
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When |x-x_{m}| \leq thresh, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
eps: float
precision requested
zk: complex
Helmholtz parameter - k
sources: float(3,n)
source locations (x_{j})
charges: complex(nd,n) or complex(n)
charge densities (c_{j})
dipole orientation vectors: complex(nd,3,n) or complex(3,n)
dipole orientation vectors (v_{j})
targets: float(3,nt)
target locations (x)
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
nd: integer
number of densities
thresh: contribution of source x_i, at location x ignored if |x-x_i|<=thresh
Returns:
out.pottarg - potential at target locations if requested
out.gradtarg - gradient at target locations if requested
Example:
see hfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 3, "The first dimension of sources must be 3"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
if(pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
charges = charges.reshape(1,ns)
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None):
if nd == 1 and ns>1:
assert dipvec.shape[0] == 3 and dipvec.shape[1] == ns, "dipole vectors must be of shape [3,number of sources]"
dipvec=dipvec.reshape(1,3,ns)
if nd == 1 and ns==1:
assert dipvec.shape[0] == 3, "dipole vectors must be of shape [3,number of sources]"
dipvec=dipvec.reshape(1,3,ns)
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 3 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
assert targets.shape[0] == 3, "The first dimension of targets must be 3"
nt = targets.shape[1]
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
out.pottarg = hfmm.h3ddirectcp(zk,sources,charges,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg = hfmm.h3ddirectcg(zk,sources,charges,targets,thresh)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
out.pottarg = hfmm.h3ddirectdp(zk,sources,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg = hfmm.h3ddirectdg(zk,sources,dipvec,targets,thresh)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
out.pottarg = hfmm.h3ddirectcdp(zk,sources,charges,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg = hfmm.h3ddirectcdg(zk,sources,charges,dipvec,targets,thresh)
if(nd == 1):
if(ifcharge==1):
charges = charges.reshape(ns,)
if(ifdipole==1):
dipvec = dipvec.reshape(3,ns)
if(pgt>0):
out.pottarg = out.pottarg.reshape(nt,)
if(pgt==2):
out.gradtarg = out.gradtarg.reshape(3,nt)
return out
def l3ddir(*,sources,targets,charges=None,dipvec=None,
pgt=0,nd=1,thresh=1e-16):
r"""
This subroutine computes the N-body Laplace interactions
in three dimensions where the interaction kernel is given by $1/r$
and its gradients.
.. math::
u(x) = \sum_{j=1}^{N} c_{j} /|x-x_{j}| - \\nabla( 1/|x-x_{j}|) \cdot v_{j} \, ,
where $c_{j}$ are the charge densities,
$v_{j}$ are the dipole orientation vectors, and
$x_{j}$ are the source locations.
When |x-x_{m}|leq thresh, the term corresponding to $x_{m}$ is dropped from the
sum
Args:
sources: float(3,n)
source locations (x_{j})
charges: float(nd,n) or float(n)
charge densities (c_{j})
dipole orientation vectors: float(nd,3,n) or float(3,n)
dipole orientation vectors (v_{j})
targets: float(3,nt)
target locations (x)
pgt: integer
target eval flag
potential at targets evaluated if pgt = 1
potenial and gradient at targets evaluated if pgt=2
potenial, gradient, and hessians at targets evaluated if pgt=3
nd: integer
number of densities
thresh: contribution of source x_i, at location x ignored if |x-x_i|<=thresh
Returns:
out.pottarg - potential at target locations if requested
out.gradtarg - gradient at target locations if requested
out.hesstarg - hessian at target locations if requested
Example:
see lfmmexample.py
r"""
out = Output()
assert sources.shape[0] == 3, "The first dimension of sources must be 3"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
ifcharge = 0
ifdipole = 0
if(pgt == 0):
print("Nothing to compute, set either pg or pgt to non-zero")
return out
if charges is not None:
if nd == 1:
assert charges.shape[0] == ns, "Charges must be same length as second dimension of sources"
charges = charges.reshape(1,ns)
if nd>1:
assert charges.shape[0] == nd and charges.shape[1]==ns, "Charges must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
ifcharge = 1
if(dipvec is not None):
if nd == 1 and ns>1:
assert dipvec.shape[0] == 3 and dipvec.shape[1] == ns, "dipole vectors must be of shape [3,number of sources]"
dipvec=dipvec.reshape(1,3,ns)
if nd == 1 and ns==1:
assert dipvec.shape[0] == 3, "dipole vectors must be of shape [3,number of sources]"
dipvec=dipvec.reshape(1,3,ns)
if nd>1:
assert dipvec.shape[0] == nd and dipvec.shape[1] == 3 and dipvec.shape[2] == ns, "Dipole vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
ifdipole = 1
assert targets.shape[0] == 3, "The first dimension of targets must be 3"
nt = targets.shape[1]
if(pgt == 1 and ifcharge == 1 and ifdipole == 0):
out.pottarg = lfmm.l3ddirectcp(sources,charges,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg = lfmm.l3ddirectcg(sources,charges,targets,thresh)
if(pgt == 3 and ifcharge == 1 and ifdipole == 0):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.l3ddirectch(sources,charges,targets,thresh)
if(pgt == 1 and ifcharge == 0 and ifdipole == 1):
out.pottarg = lfmm.l3ddirectdp(sources,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg = lfmm.l3ddirectdg(sources,dipvec,targets,thresh)
if(pgt == 3 and ifcharge == 0 and ifdipole == 1):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.l3ddirectdh(sources,dipvec,targets,thresh)
if(pgt == 1 and ifcharge == 1 and ifdipole == 1):
out.pottarg = lfmm.l3ddirectcdp(sources,charges,dipvec,targets,thresh)
if(pgt == 2 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg = lfmm.l3ddirectcdg(sources,charges,dipvec,targets,thresh)
if(pgt == 3 and ifcharge == 1 and ifdipole == 1):
out.pottarg,out.gradtarg,out.hesstarg = lfmm.l3ddirectcdh(sources,charges,dipvec,targets,thresh)
if(nd == 1):
if(ifcharge == 1):
charges = charges.reshape(ns,)
if(ifdipole ==1):
dipvec = dipvec.reshape(3,ns)
if(pgt>0):
out.pottarg = out.pottarg.reshape(nt,)
if(pgt==2):
out.gradtarg = out.gradtarg.reshape(3,nt)
if(pgt==3):
out.hesstarg = out.hesstarg.reshape(6,nt)
return out
def em3ddir(*,eps,zk,sources,h_current=None,e_current=None,e_charge=None,targets=None,ifE=0,ifcurlE=0,ifdivE=0,nd=1,thresh=1e-16):
r"""
This function subrourine computes
E = curl S_{k}[h_current] + S_{k}[e_current] + grad S_{k}[e_charge] -- (1)
using the vector Helmholtz fmm.
The subroutine also computes divE, curlE
with appropriate flags
Remark: the subroutine uses a stabilized representation
for computing the divergence by using integration by parts
wherever possible. If the divergence is not requested, then the
helmholtz fmm is called with 3*nd densities, while if the divergence
is requested, then the helmholtz fmm is calld with 4*nd densities
Args:
eps (float): precision requested
zk (complex): Helmholtz parameter
sources (float(3,n)): source locations
h_current (complex(nd,3,n) or complex(3,n)): a vector source
e_current (complex(nd,3,n) or complex(3,n)): b vector source
e_charge (complex(nd,n) or complex(n)): e_charge source
targets (float(3,nt)): target locations
ifE (integer): E is returned at the target locations if ifE = 1
ifcurlE (integer): curl E is returned at the target locations if ifcurlE = 1
ifdivE (integer): div E is returned at the target locations if ifdivE = 1
nd (integer): number of densities
thresh: contribution of source x_i, at location x ignored if |x-x_i|<=thresh
Returns:
Returns an object of type Output (out) with the following variables
out.E: E field defined in (1) above at target locations if requested
out.curlE: curl of E field at target locations if requested
out.divE: divergence of E at target locations if requested
Example:
see emfmmexample.py
r"""
out = Output()
if(targets == None):
print("Nothing to compute, set targets")
return out
if(ifE == 0 and ifcurlE == 0 and ifdivE == 0):
print("Nothing to compute, set either ifE, ifcurlE or ifdivE to non-zero")
return out
assert sources.shape[0] == 3, "The first dimension of sources must be 3"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
assert targets.shape[0] == 3, "The first dimension of targets must be 3"
if(np.size(np.shape(targets))==2):
nt = targets.shape[1]
if(np.size(np.shape(targets))==1):
nt = 1
ifh_current = 0
ife_current = 0
ife_charge = 0
if(h_current is not None):
if(nd == 1 and ns>1):
assert h_current.shape[0] == 3 and h_current.shape[1] == ns, "h_current vectors must be of shape [3,number of sources]"
if(nd == 1 and ns==1):
assert h_current.shape[0] == 3, "h_current vectors must be of shape [3,number of sources]"
if(nd>1):
assert h_current.shape[0] == nd and h_current.shape[1] == 3 and h_current.shape[2] == ns, "h_current vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
h_current = h_current.reshape([nd,3,ns])
ifh_current = 1
else:
h_current = np.zeros([nd,3,ns],dtype=complex)
if(e_current is not None):
if(nd == 1 and ns>1):
assert e_current.shape[0] == 3 and e_current.shape[1] == ns, "e_current vectors must be of shape [3,number of sources]"
if(nd == 1 and ns==1):
assert e_current.shape[0] == 3, "e_current vectors must be of shape [3,number of sources]"
if(nd>1):
assert e_current.shape[0] == nd and e_current.shape[1] == 3 and e_current.shape[2] == ns, "e_current vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
e_current = e_current.reshape([nd,3,ns])
ife_current = 1
else:
e_current = np.zeros([nd,3,ns],dtype=complex)
if(e_charge is not None):
if(nd == 1):
assert e_charge.shape[0] == ns, "e_charge must be same length as second dimension of sources"
if(nd>1):
assert e_charge.shape[0] == nd and e_charge.shape[1]==ns, "e_charge must be of shape [nd,ns] where nd is number of densities, and ns is number of sources"
e_charge = e_charge.reshape([nd,ns])
ife_charge = 1
else:
e_charge = np.zeros([nd,ns],dtype=complex)
out.E,out.curlE,out.divE = emfmm.em3ddirect(eps,zk,sources,ifh_current,h_current,ife_current,e_current,ife_charge,e_charge,targets,ifE,ifcurlE,ifdivE,thresh,nd,ns,nt)
if(ifE==0):
out.E = None
if(ifcurlE==0):
out.curlE = None
if(ifdivE==0):
out.divE = None
return out
def st3ddir(*,eps,sources,stoklet=None,strslet=None,strsvec=None,targets=None,ifppreg=0,ifppregtarg=0,nd=1,thresh=1e-16):
r"""
This subroutine evaluates all pairwise particle
interactions (ignoring self-interactions) and
interactions with targs.
This routine computes sums of the form
u(x) = sum_m G_{ij}(x,y^{(m)}) sigma^{(m)}_j
+ sum_m T_{ijk}(x,y^{(m)}) mu^{(m)}_j nu^{(m)}_k
where sigma^{(m)} is the Stokeslet charge, mu^{(m)} is the
stresslet charge, and nu^{(m)} is the stresslet orientation
(note that each of these is a 3 vector per source point y^{(m)}).
For x a source point, the self-interaction in the sum is omitted.
Optionally, the associated pressure p(x) and gradient grad u(x)
are returned
p(x) = sum_m P_j(x,y^m) sigma^{(m)}_j
+ sum_m T_{ijk}(x,y^{(m)}) PI_{jk} mu^{(m)}_j nu^{(m)}_k
grad u(x) = grad[sum_m G_{ij}(x,y^m) sigma^{(m)}_j
+ sum_m T_{ijk}(x,y^{(m)}) mu^{(m)}_j nu^{(m)}_k]
Args:
eps: float
precision requested
sources: float(3,n)
source locations
stoklet: float(nd,3,n) or float(3,n)
Stokeslet charge strengths (sigma vectors above)
strslet: float(nd,3,n) or float(3,n)
stresslet strengths (mu vectors above)
strsvec: float(nd,3,n) or float(3,n)
stresslet orientations (nu vectors above)
targets: float(3,nt)
target locations (x)
ifppreg: integer
flag for evaluating potential, gradient, and pressure
at the sources
ifppreg = 1, only potential
ifppreg = 2, potential and pressure
ifppreg = 3, potential, pressure, and gradient
ifppregtarg: integer
flag for evaluating potential, gradient, and pressure
at the targets
ifppregtarg = 1, only potential
ifppregtarg = 2, potential and pressure
ifppregtarg = 3, potential, pressure, and gradient
nd: integer
number of densities
thresh: contribution of source x_i, at location x ignored if |x-x_i|<=thresh
Returns:
out.pot: velocity at source locations if requested
out.pre: pressure at source locations if requested
out.grad: gradient of velocity at source locations if requested
out.pottarg: velocity at target locations if requested
out.pretarg: pressure at target locations if requested
out.gradtarg: gradient of velocity at target locations if requested
Example:
see stfmmexample.py
r"""
out = Output()
if(ifppreg == 0 and ifppregtarg == 0):
print("Nothing to compute, set either ifppreg or ifppregtarg to non-zero")
return out
if(stoklet == None and strslet == None and strsvec == None):
print("Nothing to compute, set either stoklet or strslet+strsvec to non-None")
return out
if(strslet is not None and strsvec is None):
print("strslet and strsvec mush be both None or both not None")
return out
if(strslet is None and strsvec is not None):
print("strslet and strsvec mush be both None or both not None")
return out
assert sources.shape[0] == 3, "The first dimension of sources must be 3"
if(np.size(np.shape(sources))==2):
ns = sources.shape[1]
if(np.size(np.shape(sources))==1):
ns = 1
if(targets is not None):
assert targets.shape[0] == 3, "The first dimension of targets must be 3"
if(np.size(np.shape(targets))==2):
nt = targets.shape[1]
if(np.size(np.shape(targets))==1):
nt = 1
else:
targets = np.zeros([3,0],dtype='double')
nt = 0
ifstoklet = 0
ifstrslet = 0
if(stoklet is not None):
if(nd == 1 and ns>1):
assert stoklet.shape[0] == 3 and stoklet.shape[1] == ns, "stoklet vectors must be of shape [3,number of sources]"
if(nd == 1 and ns==1):
assert stoklet.shape[0] == 3, "stoklet vectors must be of shape [3,number of sources]"
if(nd>1):
assert stoklet.shape[0] == nd and stoklet.shape[1] == 3 and stoklet.shape[2] == ns, "stoklet vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
stoklet = stoklet.reshape([nd,3,ns])
ifstoklet = 1
else:
stoklet = np.zeros([nd,3,ns],dtype='double')
if(strslet is not None and strsvec is not None):
if(nd == 1 and ns>1):
assert strslet.shape[0] == 3 and strslet.shape[1] == ns, "strslet vectors must be of shape [3,number of sources]"
assert strsvec.shape[0] == 3 and strsvec.shape[1] == ns, "strsvec vectors must be of shape [3,number of sources]"
if(nd == 1 and ns==1):
assert strslet.shape[0] == 3, "strslet vectors must be of shape [3,number of sources]"
assert strsvec.shape[0] == 3, "strsvec vectors must be of shape [3,number of sources]"
if(nd>1):
assert strslet.shape[0] == nd and strslet.shape[1] == 3 and strslet.shape[2] == ns, "strslet vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
assert strsvec.shape[0] == nd and strsvec.shape[1] == 3 and strsvec.shape[2] == ns, "strsvec vectors must be of shape [nd,3,ns] where nd is number of densities, and ns is number of sources"
strslet = strslet.reshape([nd,3,ns])
strsvec = strsvec.reshape([nd,3,ns])
ifstrslet = 1
else:
strslet = np.zeros([nd,3,ns],dtype='double')
strsvec = np.zeros([nd,3,ns],dtype='double')
if(ifstoklet == 1 and ifstrslet == 0):
out.pot,out.pre,out.grad = stfmm.st3ddirectstokg(sources,stoklet,sources,thresh,nd,ns,nt)
out.pottarg,out.pretarg,out.gradtarg = stfmm.st3ddirectstokg(sources,stoklet,targets,thresh,nd,ns,nt)
else:
out.pot,out.pre,out.grad = stfmm.st3ddirectstokstrsg(sources,stoklet,1,strslet,strsvec,sources,thresh,nd,ns,nt)
out.pottarg,out.pretarg,out.gradtarg = stfmm.st3ddirectstokstrsg(sources,stoklet,1,strslet,strsvec,targets,thresh,nd,ns,nt)
if(ifppreg < 3):
out.grad = None
if(ifppregtarg < 3):
out.gradtarg = None
if(ifppreg < 2):
out.pre = None
if(ifppregtarg < 2):
out.pretarg = None
if(ifppreg < 1):
out.pot = None
if(ifppregtarg < 1):
out.pottarg = None
return out
def comperr(*,ntest,out,outex,pg=0,pgt=0,nd=1):
r = 0
err = 0
if(nd == 1):
if(pg > 0):
r = r+la.norm(outex.pot[0:ntest])**2
err = err+la.norm(outex.pot[0:ntest]-out.pot[0:ntest])**2
if(pg >= 2):
g = out.grad[:,0:ntest].reshape(3*ntest,)
gex = outex.grad[:,0:ntest].reshape(3*ntest,)
r = r +la.norm(gex)**2
err = err+la.norm(gex-g)**2
if( pg >= 3):
h = out.hess[:,0:ntest].reshape(6*ntest,)
hhex = outex.hess[:,0:ntest].reshape(6*ntest,)
r = r + la.norm(hhex)**2
err = err + la.norm(hhex-h)**2
if(pgt > 0):
r = r+la.norm(outex.pottarg[0:ntest])**2
err = err+la.norm(outex.pottarg[0:ntest]-out.pottarg[0:ntest])**2
if(pgt >= 2):
g = out.gradtarg[:,0:ntest].reshape(3*ntest,)
gex = outex.gradtarg[:,0:ntest].reshape(3*ntest,)
r = r +la.norm(gex)**2
err = err+la.norm(gex-g)**2
if( pgt >= 3):
h = out.hesstarg[:,0:ntest].reshape(6*ntest,)
hhex = outex.hesstarg[:,0:ntest].reshape(6*ntest,)
r = r + la.norm(hhex)**2
err = err + la.norm(hhex-h)**2
if(nd > 1):
if(pg > 0):
p = out.pot[:,0:ntest].reshape(nd*ntest,)
pex = outex.pot[:,0:ntest].reshape(nd*ntest,)
r = r+la.norm(pex)**2
err = err+la.norm(p-pex)**2
if(pg >= 2):
g = out.grad[:,:,0:ntest].reshape(3*nd*ntest,)
gex = outex.grad[:,:,0:ntest].reshape(3*nd*ntest,)
r = r +la.norm(gex)**2
err = err+la.norm(gex-g)**2
if( pg >= 3):
h = out.hess[:,:,0:ntest].reshape(6*nd*ntest,)
hhex = outex.hess[:,:,0:ntest].reshape(6*nd*ntest,)
r = r + la.norm(hhex)**2
err = err + la.norm(hhex-h)**2
if(pgt > 0):
p = out.pottarg[:,0:ntest].reshape(nd*ntest,)
pex = outex.pottarg[:,0:ntest].reshape(nd*ntest,)
r = r+la.norm(pex)**2
err = err+la.norm(p-pex)**2
if(pgt >= 2):
g = out.gradtarg[:,:,0:ntest].reshape(3*nd*ntest,)
gex = outex.gradtarg[:,:,0:ntest].reshape(3*nd*ntest,)
r = r +la.norm(gex)**2
err = err+la.norm(gex-g)**2
if( pgt >= 3):
h = out.hesstarg[:,:,0:ntest].reshape(6*nd*ntest,)
hhex = outex.hesstarg[:,:,0:ntest].reshape(6*nd*ntest,)
r = r + la.norm(hhex)**2
err = err + la.norm(hhex-h)**2
err = np.sqrt(err/r)
return err
| 44.213608
| 209
| 0.591776
| 8,501
| 55,886
| 3.82602
| 0.035172
| 0.013928
| 0.021829
| 0.022137
| 0.964735
| 0.961076
| 0.951299
| 0.938263
| 0.925257
| 0.920246
| 0
| 0.027829
| 0.283076
| 55,886
| 1,263
| 210
| 44.248614
| 0.783956
| 0.249723
| 0
| 0.750323
| 0
| 0.027167
| 0.139344
| 0
| 0
| 0
| 0
| 0
| 0.093144
| 1
| 0.011643
| false
| 0
| 0.007762
| 0
| 0.072445
| 0.020699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1531fc431d45203a3d223c9a9c97137da4ec4dc3
| 210
|
py
|
Python
|
tests/shell/src/shellfoundry_test_driver.py
|
QualiSystems/Shellfoundry-Traffic
|
967a6ab0208116506fcf42822bb3f293c3be18c6
|
[
"Apache-2.0"
] | null | null | null |
tests/shell/src/shellfoundry_test_driver.py
|
QualiSystems/Shellfoundry-Traffic
|
967a6ab0208116506fcf42822bb3f293c3be18c6
|
[
"Apache-2.0"
] | 4
|
2020-10-29T13:16:29.000Z
|
2020-11-22T09:00:05.000Z
|
tests/shell/src/shellfoundry_test_driver.py
|
QualiSystems/Shellfoundry-Traffic
|
967a6ab0208116506fcf42822bb3f293c3be18c6
|
[
"Apache-2.0"
] | null | null | null |
from cloudshell.shell.core.resource_driver_interface import ResourceDriverInterface
class ShellFoundry1Driver(ResourceDriverInterface):
pass
class ShellFoundry2Driver(ResourceDriverInterface):
pass
| 21
| 83
| 0.852381
| 17
| 210
| 10.411765
| 0.764706
| 0.305085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.104762
| 210
| 9
| 84
| 23.333333
| 0.930851
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
157b8e1e56804217d32be4c30b01200b16bea713
| 145
|
py
|
Python
|
spyke/graphics/texturing/texture_proxy.py
|
m4reQ/spyke
|
4b1aa4c01c5c33e050f03eb9e8d396f9716f9e93
|
[
"Unlicense"
] | null | null | null |
spyke/graphics/texturing/texture_proxy.py
|
m4reQ/spyke
|
4b1aa4c01c5c33e050f03eb9e8d396f9716f9e93
|
[
"Unlicense"
] | 4
|
2022-01-24T01:35:28.000Z
|
2022-02-10T19:48:35.000Z
|
spyke/graphics/texturing/texture_proxy.py
|
m4reQ/spyke
|
4b1aa4c01c5c33e050f03eb9e8d396f9716f9e93
|
[
"Unlicense"
] | null | null | null |
class TextureProxy:
def __init__(self, _id: int):
self._id: int = _id
@property
def id(self) -> int:
return self.id
| 18.125
| 33
| 0.572414
| 19
| 145
| 4
| 0.473684
| 0.236842
| 0.236842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.317241
| 145
| 7
| 34
| 20.714286
| 0.767677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
15bcc095bf40be3757e37468abc981ff700cef34
| 36
|
py
|
Python
|
Plugins/GUI/VFS_Window/__init__.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 25
|
2018-12-10T12:52:11.000Z
|
2022-01-29T14:42:57.000Z
|
Plugins/GUI/VFS_Window/__init__.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 4
|
2019-08-01T19:09:11.000Z
|
2022-01-02T01:47:42.000Z
|
Plugins/GUI/VFS_Window/__init__.py
|
bvbohnen/X4_Customizer
|
6f865008690916a66a44c97331d9a2692baedb35
|
[
"MIT"
] | 6
|
2019-02-16T08:39:04.000Z
|
2021-12-21T06:11:58.000Z
|
from .VFS_Window import VFS_Window
| 12
| 34
| 0.833333
| 6
| 36
| 4.666667
| 0.666667
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 36
| 2
| 35
| 18
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ec643d46e5739bc77e2a6dfc64067405326d947a
| 115
|
py
|
Python
|
app/core/helpers.py
|
NjengaSaruni/Accountant-Web-Server
|
e77124307f235a4a446e1251d2830d7587cb8719
|
[
"MIT"
] | null | null | null |
app/core/helpers.py
|
NjengaSaruni/Accountant-Web-Server
|
e77124307f235a4a446e1251d2830d7587cb8719
|
[
"MIT"
] | 19
|
2018-12-10T20:06:21.000Z
|
2021-06-10T21:03:31.000Z
|
app/core/helpers.py
|
NjengaSaruni/Accountant-Web-Server
|
e77124307f235a4a446e1251d2830d7587cb8719
|
[
"MIT"
] | null | null | null |
import random
def random_color():
return f'#{"".join([random.choice("0123456789ABCDEF") for _ in range(6)])}'
| 23
| 79
| 0.678261
| 15
| 115
| 5.066667
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11
| 0.130435
| 115
| 5
| 79
| 23
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0.560345
| 0.37931
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
ec8899065727013d02244db78d25a85de8563c64
| 421
|
py
|
Python
|
header.py
|
mejongetje/Dominoes
|
cefa513ef0b6c2d6cba095b7afa8222388c24b35
|
[
"MIT"
] | null | null | null |
header.py
|
mejongetje/Dominoes
|
cefa513ef0b6c2d6cba095b7afa8222388c24b35
|
[
"MIT"
] | null | null | null |
header.py
|
mejongetje/Dominoes
|
cefa513ef0b6c2d6cba095b7afa8222388c24b35
|
[
"MIT"
] | null | null | null |
def header():
print(' =================================================== ')
print('|%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%|')
print('|%%%%%%%%%%%%%%%%%% D O M I N O E S %%%%%%%%%%%%%%%%|')
print('|%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%|')
print('|%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%|')
print('|===================================================|')
| 60.142857
| 67
| 0.111639
| 16
| 421
| 2.9375
| 0.625
| 0.851064
| 0.638298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114014
| 421
| 7
| 68
| 60.142857
| 0.126005
| 0
| 0
| 0.428571
| 0
| 0
| 0.764423
| 0.632212
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0
| 0
| 0.142857
| 0.857143
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
ec9c6761b54ebd89cf2bba7ab3f434e2808cd24d
| 105
|
py
|
Python
|
app/txdb/__init__.py
|
tildecross/tildex-txdb
|
bc32e1875c55cedfbf4ce7d00174b4b7bf68525f
|
[
"BSD-3-Clause"
] | null | null | null |
app/txdb/__init__.py
|
tildecross/tildex-txdb
|
bc32e1875c55cedfbf4ce7d00174b4b7bf68525f
|
[
"BSD-3-Clause"
] | 1
|
2017-10-15T01:11:11.000Z
|
2019-10-18T20:02:32.000Z
|
app/txdb/__init__.py
|
tildecross/tildex-txdb
|
bc32e1875c55cedfbf4ce7d00174b4b7bf68525f
|
[
"BSD-3-Clause"
] | null | null | null |
from app.txdb.parser import TxDBParser
from app.txdb.core import TxDBCore
from app.txdb.plugins import *
| 26.25
| 38
| 0.819048
| 17
| 105
| 5.058824
| 0.529412
| 0.244186
| 0.383721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 105
| 3
| 39
| 35
| 0.924731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ecabb448df5dbf7c040fd25d0cfe19bdf6b67e7e
| 40,179
|
py
|
Python
|
picbackend/views/v2/navigator_views/tools/create_update_delete.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
picbackend/views/v2/navigator_views/tools/create_update_delete.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
picbackend/views/v2/navigator_views/tools/create_update_delete.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
from django import forms
from django.core.validators import validate_email
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
import datetime
import pytz
from picbackend.views.utils import clean_int_value_from_dict_object
from picbackend.views.utils import clean_list_value_from_dict_object
from picbackend.views.utils import clean_string_value_from_dict_object
from picbackend.views.utils import clean_dict_value_from_dict_object
def validate_put_rqst_params(rqst_body, rqst_errors):
validated_params = {
'rqst_action': clean_string_value_from_dict_object(rqst_body, "root", "db_action", rqst_errors)
}
rqst_action = validated_params['rqst_action']
if rqst_action == 'create':
validate_create_row_params(rqst_body, validated_params, rqst_errors)
elif rqst_action == 'update':
validated_params['id'] = clean_int_value_from_dict_object(rqst_body, "root", "id", rqst_errors)
validate_update_row_params(rqst_body, validated_params, rqst_errors)
elif rqst_action == 'delete':
validated_params['id'] = clean_int_value_from_dict_object(rqst_body, "root", "id", rqst_errors)
return validated_params
def validate_create_row_params(rqst_body, validated_params, rqst_errors):
email = clean_string_value_from_dict_object(rqst_body, "root", "email", rqst_errors)
if email and not rqst_errors:
try:
validate_email(email)
except forms.ValidationError:
rqst_errors.append("{!s} must be a valid email address".format(email))
validated_params["email"] = email
mpn = clean_string_value_from_dict_object(
rqst_body,
"root",
"mpn",
rqst_errors,
empty_string_allowed=True,
none_allowed=True
)
if mpn is None:
mpn = ''
validated_params["mpn"] = mpn
first_name = clean_string_value_from_dict_object(rqst_body, "root", "first_name", rqst_errors)
validated_params["first_name"] = first_name
last_name = clean_string_value_from_dict_object(rqst_body, "root", "last_name", rqst_errors)
validated_params["last_name"] = last_name
county = clean_string_value_from_dict_object(rqst_body, "root", "county", rqst_errors)
validated_params["county"] = county
type = clean_string_value_from_dict_object(rqst_body, "root", "type", rqst_errors)
validated_params["type"] = type
if 'add_base_locations' in rqst_body:
add_base_location_names = clean_list_value_from_dict_object(
rqst_body,
"root",
"add_base_locations",
rqst_errors,
empty_list_allowed=True
)
validated_base_location_names = []
for base_location_name in add_base_location_names:
if not isinstance(base_location_name, str):
rqst_errors.append('Error: A base_location_name in \'add_base_locations\' is not a string.')
continue
validated_base_location_names.append(base_location_name)
validated_params['add_base_locations'] = validated_base_location_names
if 'add_approved_clients_for_case_management' in rqst_body:
add_approved_clients_for_case_management = clean_list_value_from_dict_object(
rqst_body,
"root",
"add_approved_clients_for_case_management",
rqst_errors,
empty_list_allowed=True
)
validated_approved_clients_for_case_management = []
for approved_client_for_case_management in add_approved_clients_for_case_management:
if not isinstance(approved_client_for_case_management, int):
rqst_errors.append('Error: An approved_client_for_case_management in \'add_approved_clients_for_case_management\' is not an integer.')
continue
validated_approved_clients_for_case_management.append(approved_client_for_case_management)
validated_params['add_approved_clients_for_case_management'] = validated_approved_clients_for_case_management
validate_nav_signup_params(rqst_body, validated_params, rqst_errors)
def validate_update_row_params(rqst_body, validated_params, rqst_errors):
if "email" in rqst_body:
email = clean_string_value_from_dict_object(rqst_body, "root", "email", rqst_errors)
if email and not rqst_errors:
try:
validate_email(email)
except forms.ValidationError:
rqst_errors.append("{!s} must be a valid email address".format(email))
validated_params["email"] = email
if "mpn" in rqst_body:
mpn = clean_string_value_from_dict_object(
rqst_body,
"root",
"mpn",
rqst_errors,
empty_string_allowed=True,
none_allowed=True
)
if mpn is None:
mpn = ''
validated_params["mpn"] = mpn
if "first_name" in rqst_body:
first_name = clean_string_value_from_dict_object(rqst_body, "root", "first_name", rqst_errors)
validated_params["first_name"] = first_name
if "last_name" in rqst_body:
last_name = clean_string_value_from_dict_object(rqst_body, "root", "last_name", rqst_errors)
validated_params["last_name"] = last_name
if "county" in rqst_body:
county = clean_string_value_from_dict_object(rqst_body, "root", "county", rqst_errors)
validated_params["county"] = county
if "type" in rqst_body:
type = clean_string_value_from_dict_object(rqst_body, "root", "type", rqst_errors)
validated_params["type"] = type
if 'add_base_locations' in rqst_body:
add_base_location_names = clean_list_value_from_dict_object(
rqst_body,
"root",
"add_base_locations",
rqst_errors,
empty_list_allowed=True
)
validated_base_location_names = []
for base_location_name in add_base_location_names:
if not isinstance(base_location_name, str):
rqst_errors.append('Error: A base_location_name in \'add_base_locations\' is not a string.')
continue
validated_base_location_names.append(base_location_name)
validated_params['add_base_locations'] = validated_base_location_names
elif 'remove_base_locations' in rqst_body:
remove_base_location_names = clean_list_value_from_dict_object(
rqst_body,
"root",
"remove_base_locations",
rqst_errors
)
validated_base_location_names = []
for base_location_name in remove_base_location_names:
if not isinstance(base_location_name, str):
rqst_errors.append('Error: A base_location_name in \'remove_base_locations\' is not a string.')
continue
validated_base_location_names.append(base_location_name)
validated_params['remove_base_locations'] = validated_base_location_names
if 'add_approved_clients_for_case_management' in rqst_body:
add_approved_clients_for_case_management = clean_list_value_from_dict_object(
rqst_body,
"root",
"add_approved_clients_for_case_management",
rqst_errors,
empty_list_allowed=True
)
validated_approved_clients_for_case_management = []
for approved_client_for_case_management in add_approved_clients_for_case_management:
if not isinstance(approved_client_for_case_management, int):
rqst_errors.append('Error: An approved_client_for_case_management in \'add_approved_clients_for_case_management\' is not an integer.')
continue
validated_approved_clients_for_case_management.append(approved_client_for_case_management)
validated_params['add_approved_clients_for_case_management'] = validated_approved_clients_for_case_management
elif 'remove_approved_clients_for_case_management' in rqst_body:
remove_approved_clients_for_case_management = clean_list_value_from_dict_object(
rqst_body,
"root",
"remove_approved_clients_for_case_management",
rqst_errors
)
validated_approved_clients_for_case_management = []
for approved_client_for_case_management in remove_approved_clients_for_case_management:
if not isinstance(approved_client_for_case_management, int):
rqst_errors.append('Error: An approved_client_for_case_management in \'remove_approved_clients_for_case_management\' is not an integer.')
continue
validated_approved_clients_for_case_management.append(approved_client_for_case_management)
validated_params['remove_approved_clients_for_case_management'] = validated_approved_clients_for_case_management
validate_nav_signup_params(rqst_body, validated_params, rqst_errors)
def validate_nav_signup_params(rqst_body, validated_params, rqst_errors):
if 'add_healthcare_locations_worked' in rqst_body:
add_healthcare_locations_worked = clean_list_value_from_dict_object(
rqst_body,
"root",
"add_healthcare_locations_worked",
rqst_errors,
empty_list_allowed=True
)
validated_location_info = []
for location_dict in add_healthcare_locations_worked:
if not isinstance(location_dict, dict):
rqst_errors.append('Error: A location object in \'add_healthcare_locations_worked\' is not a object.')
else:
location_info = {
"name": clean_string_value_from_dict_object(
location_dict,
"add_location_object",
'name',
rqst_errors
),
"state_province": clean_string_value_from_dict_object(
location_dict,
"add_location_object",
'state_province',
rqst_errors,
none_allowed=True
)
}
if not location_info['state_province']:
location_info['state_province'] = 'not available'
validated_location_info.append(location_info)
validated_params['add_healthcare_locations_worked'] = validated_location_info
elif 'remove_healthcare_locations_worked' in rqst_body:
remove_healthcare_locations_worked = clean_list_value_from_dict_object(
rqst_body,
"root",
"remove_healthcare_locations_worked",
rqst_errors
)
validated_location_info = []
for location_dict in remove_healthcare_locations_worked:
if not isinstance(location_dict, dict):
rqst_errors.append('Error: A location object in \'remove_healthcare_locations_worked\' is not a object.')
else:
location_info = {
"name": clean_string_value_from_dict_object(
location_dict,
"remove_location_object",
'name',
rqst_errors
),
"state_province": clean_string_value_from_dict_object(
location_dict,
"remove_location_object",
'state_province',
rqst_errors,
none_allowed=True
)
}
if not location_info['state_province']:
location_info['state_province'] = 'not available'
validated_location_info.append(location_info)
validated_params['remove_healthcare_locations_worked'] = validated_location_info
if 'add_healthcare_service_expertises' in rqst_body:
add_healthcare_service_expertises = clean_list_value_from_dict_object(
rqst_body,
"root",
"add_healthcare_service_expertises",
rqst_errors,
empty_list_allowed=True
)
validated_service_expertise_info = []
for service_expertise in add_healthcare_service_expertises:
if not isinstance(service_expertise, str):
rqst_errors.append('Error: A service_expertise in \'add_healthcare_service_expertises\' is not a string.')
continue
validated_service_expertise_info.append(service_expertise)
validated_params['add_healthcare_service_expertises'] = validated_service_expertise_info
elif 'remove_healthcare_service_expertises' in rqst_body:
remove_healthcare_service_expertises = clean_list_value_from_dict_object(
rqst_body,
"root",
"remove_healthcare_service_expertises",
rqst_errors
)
validated_service_expertise_info = []
for service_expertise in remove_healthcare_service_expertises:
if not isinstance(service_expertise, str):
rqst_errors.append('Error: A service_expertise in \'remove_healthcare_service_expertises\' is not a string.')
continue
validated_service_expertise_info.append(service_expertise)
validated_params['remove_healthcare_service_expertises'] = validated_service_expertise_info
if 'add_insurance_carrier_specialties' in rqst_body:
add_insurance_carrier_specialties = clean_list_value_from_dict_object(
rqst_body,
"root",
"add_insurance_carrier_specialties",
rqst_errors,
empty_list_allowed=True
)
validated_insurance_carrier_info = []
for carrier_dict in add_insurance_carrier_specialties:
if not isinstance(carrier_dict, dict):
rqst_errors.append('Error: An insurance_carrier object in \'add_insurance_carrier_specialties\' is not a object.')
else:
validated_carrier_info = {
"name": clean_string_value_from_dict_object(
carrier_dict,
"insurance_carrier_object",
'name',
rqst_errors,
empty_string_allowed=True
),
"state_province": clean_string_value_from_dict_object(
carrier_dict,
"insurance_carrier_object",
'state_province',
rqst_errors,
empty_string_allowed=True
)
}
if not validated_carrier_info['state_province']:
validated_carrier_info['state_province'] = 'not available'
validated_insurance_carrier_info.append(validated_carrier_info)
validated_params['add_insurance_carrier_specialties'] = validated_insurance_carrier_info
elif 'remove_insurance_carrier_specialties' in rqst_body:
remove_insurance_carrier_specialties = clean_list_value_from_dict_object(
rqst_body,
"root",
"remove_insurance_carrier_specialties",
rqst_errors
)
validated_insurance_carrier_info = []
for carrier_dict in remove_insurance_carrier_specialties:
if not isinstance(carrier_dict, dict):
rqst_errors.append('Error: An insurance_carrier object in \'remove_insurance_carrier_specialties\' is not a object.')
else:
validated_carrier_info = {
"name": clean_string_value_from_dict_object(
carrier_dict,
"insurance_carrier_object",
'name',
rqst_errors,
empty_string_allowed=True
),
"state_province": clean_string_value_from_dict_object(
carrier_dict,
"insurance_carrier_object",
'state_province',
rqst_errors,
empty_string_allowed=True
)
}
if not validated_carrier_info['state_province']:
validated_carrier_info['state_province'] = 'not available'
validated_insurance_carrier_info.append(validated_carrier_info)
validated_params['remove_insurance_carrier_specialties'] = validated_insurance_carrier_info
if "address_line_1" in rqst_body:
address_line_1 = clean_string_value_from_dict_object(
rqst_body,
"root",
"address_line_1",
rqst_errors,
empty_string_allowed=True
)
validated_params["address_line_1"] = address_line_1
if "address_line_2" in rqst_body:
address_line_2 = clean_string_value_from_dict_object(
rqst_body,
"root",
"address_line_2",
rqst_errors,
empty_string_allowed=True
)
if address_line_2 is None:
address_line_2 = ''
validated_params["address_line_2"] = address_line_2
if "city" in rqst_body:
city = clean_string_value_from_dict_object(rqst_body, "root", "city", rqst_errors, empty_string_allowed=True)
validated_params["city"] = city
if "state_province" in rqst_body:
state_province = clean_string_value_from_dict_object(
rqst_body,
"root",
"state_province",
rqst_errors,
empty_string_allowed=True
)
validated_params["state_province"] = state_province
if "zipcode" in rqst_body:
zipcode = clean_string_value_from_dict_object(
rqst_body,
"root",
"zipcode",
rqst_errors,
empty_string_allowed=True
)
validated_params["zipcode"] = zipcode
if "phone" in rqst_body:
phone = clean_string_value_from_dict_object(
rqst_body,
"root",
"phone",
rqst_errors,
none_allowed=True
)
validated_params["phone"] = phone
if "reported_region" in rqst_body:
reported_region = clean_string_value_from_dict_object(
rqst_body,
"root",
"reported_region",
rqst_errors,
none_allowed=True
)
validated_params["reported_region"] = reported_region
if "video_link" in rqst_body:
video_link = clean_string_value_from_dict_object(
rqst_body,
"root",
"video_link",
rqst_errors,
none_allowed=True
)
if video_link:
validate = URLValidator()
try:
validate(video_link)
except ValidationError:
rqst_errors.append("'video_link' is not a valid url. value is: {}".format(video_link))
validated_params["video_link"] = video_link
if "navigator_organization" in rqst_body:
navigator_organization = clean_string_value_from_dict_object(
rqst_body,
"root",
"navigator_organization",
rqst_errors,
none_allowed=True
)
validated_params["navigator_organization"] = navigator_organization
validate_nav_signup_resume_params(rqst_body, validated_params, rqst_errors)
def validate_nav_signup_resume_params(rqst_body, validated_params, rqst_errors):
if "create_resume_row" in rqst_body:
resume_row_params = clean_dict_value_from_dict_object(
rqst_body,
"root",
"create_resume_row",
rqst_errors
)
validated_resume_row_params = {}
if resume_row_params:
validated_resume_row_params = validate_create_resume_row_params(
resume_row_params,
rqst_errors
)
validated_params['create_resume_row'] = validated_resume_row_params
elif "update_resume_row" in rqst_body:
resume_row_params = clean_dict_value_from_dict_object(
rqst_body,
"root",
"update_resume_row",
rqst_errors
)
validated_resume_row_params = {}
if resume_row_params:
validated_resume_row_params = validate_update_resume_row_params(
resume_row_params,
rqst_errors
)
validated_params['update_resume_row'] = validated_resume_row_params
elif "delete_resume_row" in rqst_body:
resume_row_params = clean_dict_value_from_dict_object(
rqst_body,
"root",
"delete_resume_row",
rqst_errors
)
validated_resume_row_params = {}
if resume_row_params:
validated_resume_row_params = validate_delete_resume_row_params(
resume_row_params,
rqst_errors
)
validated_params['delete_resume_row'] = validated_resume_row_params
def validate_create_resume_row_params(resume_row_params, rqst_errors):
validated_resume_row_params = {
'profile_description': clean_string_value_from_dict_object(
resume_row_params,
"create_resume_row",
"profile_description",
rqst_errors,
empty_string_allowed=True,
none_allowed=True
),
}
if "create_education_rows" in resume_row_params:
education_row_params = clean_list_value_from_dict_object(
resume_row_params,
"create_resume_row",
"create_education_rows",
rqst_errors,
empty_list_allowed=True
)
validated_education_row_params = []
if education_row_params:
for education_row_index, education_row_dict in enumerate(education_row_params):
validated_education_row_dict = validate_create_education_row_params(
education_row_dict,
education_row_index,
rqst_errors
)
validated_education_row_params.append(validated_education_row_dict)
validated_resume_row_params['create_education_rows'] = validated_education_row_params
if "create_job_rows" in resume_row_params:
job_row_params = clean_list_value_from_dict_object(
resume_row_params,
"create_resume_row",
"create_job_rows",
rqst_errors,
empty_list_allowed=True
)
validated_job_row_params = []
if job_row_params:
for job_row_index, job_row_dict in enumerate(job_row_params):
validated_job_row_dict = validate_create_job_row_params(
job_row_dict,
job_row_index,
rqst_errors
)
validated_job_row_params.append(validated_job_row_dict)
validated_resume_row_params['create_job_rows'] = validated_job_row_params
return validated_resume_row_params
def validate_update_resume_row_params(resume_row_params, rqst_errors):
validated_resume_row_params = {
'id': clean_int_value_from_dict_object(
resume_row_params,
"update_resume_row",
"id",
rqst_errors
)
}
if 'profile_description' in resume_row_params:
validated_resume_row_params['profile_description'] = clean_string_value_from_dict_object(
resume_row_params,
"update_resume_row",
"profile_description",
rqst_errors,
empty_string_allowed=True,
none_allowed=True
)
if "create_education_rows" in resume_row_params:
education_row_params = clean_list_value_from_dict_object(
resume_row_params,
"update_resume_row",
"create_education_rows",
rqst_errors,
empty_list_allowed=True
)
validated_education_row_params = []
if education_row_params:
for education_row_index, education_row_dict in enumerate(education_row_params):
validated_education_row_dict = validate_create_education_row_params(
education_row_dict,
education_row_index,
rqst_errors
)
validated_education_row_params.append(validated_education_row_dict)
validated_resume_row_params['create_education_rows'] = validated_education_row_params
elif "update_education_rows" in resume_row_params:
education_row_params = clean_list_value_from_dict_object(
resume_row_params,
"update_resume_row",
"update_education_rows",
rqst_errors
)
validated_education_row_params = []
if education_row_params:
for education_row_index, education_row_dict in enumerate(education_row_params):
validated_education_row_dict = validate_update_education_row_params(
education_row_dict,
education_row_index,
rqst_errors
)
validated_education_row_params.append(validated_education_row_dict)
validated_resume_row_params['update_education_rows'] = validated_education_row_params
elif "delete_education_rows" in resume_row_params:
education_row_params = clean_list_value_from_dict_object(
resume_row_params,
"update_resume_row",
"delete_education_rows",
rqst_errors
)
validated_education_row_params = []
if education_row_params:
for education_row_index, education_row_dict in enumerate(education_row_params):
validated_education_row_dict = validate_delete_education_row_params(
education_row_dict,
education_row_index,
rqst_errors
)
validated_education_row_params.append(validated_education_row_dict)
validated_resume_row_params['delete_education_rows'] = validated_education_row_params
if "create_job_rows" in resume_row_params:
job_row_params = clean_list_value_from_dict_object(
resume_row_params,
"update_resume_row",
"create_job_rows",
rqst_errors,
empty_list_allowed=True
)
validated_job_row_params = []
if job_row_params:
for job_row_index, job_row_dict in enumerate(job_row_params):
validated_job_row_dict = validate_create_job_row_params(
job_row_dict,
job_row_index,
rqst_errors
)
validated_job_row_params.append(validated_job_row_dict)
validated_resume_row_params['create_job_rows'] = validated_job_row_params
elif "update_job_rows" in resume_row_params:
job_row_params = clean_list_value_from_dict_object(
resume_row_params,
"update_resume_row",
"update_job_rows",
rqst_errors
)
validated_job_row_params = []
if job_row_params:
for job_row_index, job_row_dict in enumerate(job_row_params):
validated_job_row_dict = validate_update_job_row_params(
job_row_dict,
job_row_index,
rqst_errors
)
validated_job_row_params.append(validated_job_row_dict)
validated_resume_row_params['update_job_rows'] = validated_job_row_params
elif "delete_job_rows" in resume_row_params:
job_row_params = clean_list_value_from_dict_object(
resume_row_params,
"update_resume_row",
"delete_job_rows",
rqst_errors
)
validated_job_row_params = []
if job_row_params:
for job_row_index, job_row_dict in enumerate(job_row_params):
validated_job_row_dict = validate_delete_job_row_params(
job_row_dict,
job_row_index,
rqst_errors
)
validated_job_row_params.append(validated_job_row_dict)
validated_resume_row_params['delete_job_rows'] = validated_job_row_params
return validated_resume_row_params
def validate_delete_resume_row_params(resume_row_params, rqst_errors):
validated_resume_row_params = {
'id': clean_int_value_from_dict_object(
resume_row_params,
"delete_resume_row",
"id",
rqst_errors
)
}
return validated_resume_row_params
def validate_create_education_row_params(education_row_dict, education_row_index, rqst_errors):
validated_education_row_dict = {
'school': clean_string_value_from_dict_object(
education_row_dict,
"create_education_row[{}]".format(education_row_index),
"school",
rqst_errors,
),
'major': clean_string_value_from_dict_object(
education_row_dict,
"create_education_row[{}]".format(education_row_index),
"major",
rqst_errors,
none_allowed=True
),
'degree_type': clean_string_value_from_dict_object(
education_row_dict,
"create_education_row[{}]".format(education_row_index),
"degree_type",
rqst_errors,
none_allowed=True
),
}
if not validated_education_row_dict['degree_type']:
validated_education_row_dict['degree_type'] = "Not Available"
if "start_year_datetime" in education_row_dict:
start_year_datetime = clean_string_value_from_dict_object(education_row_dict, "root", "start_year_datetime", rqst_errors, none_allowed=True)
validated_start_year_datetime = None
if start_year_datetime:
try:
validated_start_year_datetime = datetime.datetime.strptime(start_year_datetime, "%Y").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'start_year_datetime must be a properly formatted datetime string in UTC, eg. YYYY. Value is : {}'.format(
start_year_datetime)
)
validated_education_row_dict['start_year_datetime'] = validated_start_year_datetime
if "end_year_datetime" in education_row_dict:
end_year_datetime = clean_string_value_from_dict_object(education_row_dict, "root", "end_year_datetime", rqst_errors, none_allowed=True)
validated_end_year_datetime = None
if end_year_datetime:
try:
validated_end_year_datetime = datetime.datetime.strptime(end_year_datetime, "%Y").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'end_year_datetime must be a properly formatted datetime string in UTC, eg. YYYY. Value is : {}'.format(
end_year_datetime)
)
validated_education_row_dict['end_year_datetime'] = validated_end_year_datetime
return validated_education_row_dict
def validate_update_education_row_params(education_row_dict, education_row_index, rqst_errors):
validated_education_row_dict = {
'id': clean_int_value_from_dict_object(
education_row_dict,
"update_education_row[{}]".format(education_row_index),
"id",
rqst_errors
)
}
if 'school' in education_row_dict:
validated_education_row_dict['school'] = clean_string_value_from_dict_object(
education_row_dict,
"update_education_row[{}]".format(education_row_index),
"school",
rqst_errors,
)
if 'major' in education_row_dict:
validated_education_row_dict['major'] = clean_string_value_from_dict_object(
education_row_dict,
"update_education_row[{}]".format(education_row_index),
"major",
rqst_errors,
none_allowed=True
)
if 'degree_type' in education_row_dict:
validated_education_row_dict['degree_type'] = clean_string_value_from_dict_object(
education_row_dict,
"update_education_row[{}]".format(education_row_index),
"degree_type",
rqst_errors,
none_allowed=True
)
if not validated_education_row_dict['degree_type']:
validated_education_row_dict['degree_type'] = "Not Available"
if "start_year_datetime" in education_row_dict:
start_year_datetime = clean_string_value_from_dict_object(education_row_dict, "root", "start_year_datetime", rqst_errors, none_allowed=True)
validated_start_year_datetime = None
if start_year_datetime:
try:
validated_start_year_datetime = datetime.datetime.strptime(start_year_datetime, "%Y").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'start_year_datetime must be a properly formatted datetime string in UTC, eg. YYYY. Value is : {}'.format(
start_year_datetime)
)
validated_education_row_dict['start_year_datetime'] = validated_start_year_datetime
if "end_year_datetime" in education_row_dict:
end_year_datetime = clean_string_value_from_dict_object(education_row_dict, "root", "end_year_datetime", rqst_errors, none_allowed=True)
validated_end_year_datetime = None
if end_year_datetime:
try:
validated_end_year_datetime = datetime.datetime.strptime(end_year_datetime, "%Y").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'end_year_datetime must be a properly formatted datetime string in UTC, eg. YYYY. Value is : {}'.format(
end_year_datetime)
)
validated_education_row_dict['end_year_datetime'] = validated_end_year_datetime
return validated_education_row_dict
def validate_delete_education_row_params(education_row_dict, education_row_index, rqst_errors):
validated_education_row_dict = {
'id': clean_int_value_from_dict_object(
education_row_dict,
"delete_education_row[{}]".format(education_row_index),
"id",
rqst_errors
)
}
return validated_education_row_dict
def validate_create_job_row_params(job_row_dict, job_row_index, rqst_errors):
validated_job_row_dict = {
'title': clean_string_value_from_dict_object(
job_row_dict,
"create_job_row[{}]".format(job_row_index),
"title",
rqst_errors,
),
'company': clean_string_value_from_dict_object(
job_row_dict,
"create_job_row[{}]".format(job_row_index),
"company",
rqst_errors,
),
'description': clean_string_value_from_dict_object(
job_row_dict,
"create_job_row[{}]".format(job_row_index),
"description",
rqst_errors,
none_allowed=True
),
}
if "start_year_datetime" in job_row_dict:
start_year_datetime = clean_string_value_from_dict_object(job_row_dict, "root", "start_year_datetime", rqst_errors, none_allowed=True)
validated_start_year_datetime = None
if start_year_datetime:
try:
validated_start_year_datetime = datetime.datetime.strptime(start_year_datetime, "%Y").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'start_year_datetime must be a properly formatted datetime string in UTC, eg. YYYY. Value is : {}'.format(
start_year_datetime)
)
validated_job_row_dict['start_year_datetime'] = validated_start_year_datetime
if "end_year_datetime" in job_row_dict:
end_year_datetime = clean_string_value_from_dict_object(job_row_dict, "root", "end_year_datetime", rqst_errors, none_allowed=True)
validated_end_year_datetime = None
if end_year_datetime:
try:
validated_end_year_datetime = datetime.datetime.strptime(end_year_datetime, "%Y").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'end_year_datetime must be a properly formatted datetime string in UTC, eg. YYYY. Value is : {}'.format(
end_year_datetime)
)
validated_job_row_dict['end_year_datetime'] = validated_end_year_datetime
return validated_job_row_dict
def validate_update_job_row_params(job_row_dict, job_row_index, rqst_errors):
validated_job_row_dict = {
'id': clean_int_value_from_dict_object(
job_row_dict,
"update_job_row[{}]".format(job_row_index),
"id",
rqst_errors
)
}
if 'title' in job_row_dict:
validated_job_row_dict['title'] = clean_string_value_from_dict_object(
job_row_dict,
"update_job_row[{}]".format(job_row_index),
"title",
rqst_errors,
)
if 'company' in job_row_dict:
validated_job_row_dict['company'] = clean_string_value_from_dict_object(
job_row_dict,
"update_job_row[{}]".format(job_row_index),
"company",
rqst_errors,
)
if 'description' in job_row_dict:
validated_job_row_dict['description'] = clean_string_value_from_dict_object(
job_row_dict,
"update_job_row[{}]".format(job_row_index),
"description",
rqst_errors,
none_allowed=True
)
if "start_year_datetime" in job_row_dict:
start_year_datetime = clean_string_value_from_dict_object(job_row_dict, "root", "start_year_datetime", rqst_errors, none_allowed=True)
validated_start_year_datetime = None
if start_year_datetime:
try:
validated_start_year_datetime = datetime.datetime.strptime(start_year_datetime, "%Y").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'start_year_datetime must be a properly formatted datetime string in UTC, eg. YYYY. Value is : {}'.format(
start_year_datetime)
)
validated_job_row_dict['start_year_datetime'] = validated_start_year_datetime
if "end_year_datetime" in job_row_dict:
end_year_datetime = clean_string_value_from_dict_object(job_row_dict, "root", "end_year_datetime", rqst_errors, none_allowed=True)
validated_end_year_datetime = None
if end_year_datetime:
try:
validated_end_year_datetime = datetime.datetime.strptime(end_year_datetime, "%Y").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'end_year_datetime must be a properly formatted datetime string in UTC, eg. YYYY. Value is : {}'.format(
end_year_datetime)
)
validated_job_row_dict['end_year_datetime'] = validated_end_year_datetime
return validated_job_row_dict
def validate_delete_job_row_params(job_row_dict, job_row_index, rqst_errors):
validated_job_row_dict = {
'id': clean_int_value_from_dict_object(
job_row_dict,
"delete_job_row[{}]".format(job_row_index),
"id",
rqst_errors
)
}
return validated_job_row_dict
| 39.702569
| 153
| 0.641753
| 4,458
| 40,179
| 5.244729
| 0.030731
| 0.059022
| 0.048373
| 0.070698
| 0.941448
| 0.924982
| 0.905607
| 0.866687
| 0.839742
| 0.814721
| 0
| 0.000424
| 0.295229
| 40,179
| 1,011
| 154
| 39.74184
| 0.825264
| 0
| 0
| 0.700344
| 0
| 0
| 0.146694
| 0.0447
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016073
| false
| 0
| 0.011481
| 0
| 0.039036
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ecb5e6b35792099201a91da60705f05da00c1598
| 139
|
py
|
Python
|
site/ENV/lib/python3.7/site-packages/testapp/views.py
|
paulohenriquerosa/gnss-iot-server
|
6e7ff39bc83276d6ad86121083eb48d134d00f9d
|
[
"MIT"
] | null | null | null |
site/ENV/lib/python3.7/site-packages/testapp/views.py
|
paulohenriquerosa/gnss-iot-server
|
6e7ff39bc83276d6ad86121083eb48d134d00f9d
|
[
"MIT"
] | null | null | null |
site/ENV/lib/python3.7/site-packages/testapp/views.py
|
paulohenriquerosa/gnss-iot-server
|
6e7ff39bc83276d6ad86121083eb48d134d00f9d
|
[
"MIT"
] | null | null | null |
from testapp.websocket import echo
from testapp.websocket.chat import MyChat
from testapp.websocket.module_progressbar import MyProgressBar
| 46.333333
| 62
| 0.884892
| 18
| 139
| 6.777778
| 0.555556
| 0.270492
| 0.491803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079137
| 139
| 3
| 62
| 46.333333
| 0.953125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
01ad6ff56fee17aeed2ea1fe8b8cb8dd65099335
| 99
|
py
|
Python
|
bindings/python/idocp/solver/__init__.py
|
z8674558/idocp
|
946524db7ae4591b578be2409ca619961572e7be
|
[
"BSD-3-Clause"
] | 43
|
2020-10-13T03:43:45.000Z
|
2021-09-23T05:29:48.000Z
|
bindings/python/idocp/solver/__init__.py
|
z8674558/idocp
|
946524db7ae4591b578be2409ca619961572e7be
|
[
"BSD-3-Clause"
] | 32
|
2020-10-21T09:40:16.000Z
|
2021-10-24T00:00:04.000Z
|
bindings/python/idocp/solver/__init__.py
|
z8674558/idocp
|
946524db7ae4591b578be2409ca619961572e7be
|
[
"BSD-3-Clause"
] | 4
|
2020-10-08T05:47:16.000Z
|
2021-10-15T12:15:26.000Z
|
from .ocp_solver import *
from .unconstr_ocp_solver import *
from .unconstr_parnmpc_solver import *
| 33
| 38
| 0.828283
| 14
| 99
| 5.5
| 0.428571
| 0.467532
| 0.38961
| 0.493506
| 0.701299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 99
| 3
| 38
| 33
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
01b74a2fa9fce81fdb95ca5ed4b9d8927283b63f
| 81
|
py
|
Python
|
pyislands/permutation/tsp/__init__.py
|
sglumac/pyislands
|
a5eaceb68a0f21bd8bc8586fdf8cf0d9b7a0134f
|
[
"MIT"
] | null | null | null |
pyislands/permutation/tsp/__init__.py
|
sglumac/pyislands
|
a5eaceb68a0f21bd8bc8586fdf8cf0d9b7a0134f
|
[
"MIT"
] | null | null | null |
pyislands/permutation/tsp/__init__.py
|
sglumac/pyislands
|
a5eaceb68a0f21bd8bc8586fdf8cf0d9b7a0134f
|
[
"MIT"
] | null | null | null |
import pyislands.permutation.tsp.graph
import pyislands.permutation.tsp.mutation
| 27
| 41
| 0.876543
| 10
| 81
| 7.1
| 0.6
| 0.422535
| 0.732394
| 0.816901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049383
| 81
| 2
| 42
| 40.5
| 0.922078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
bf1496dc5f26400e2f1caf8b44d13ca1e63f3b43
| 87
|
py
|
Python
|
test/buildd/bf-projects/bf_selfbuild_project/bf_selfbuild_module.py
|
busunkim96/bigflow
|
967e262bf1a0c0958ff41af81191d48f286b43c1
|
[
"Apache-2.0"
] | 63
|
2020-08-15T19:02:06.000Z
|
2022-03-29T16:19:00.000Z
|
test/buildd/bf-projects/bf_selfbuild_project/bf_selfbuild_module.py
|
busunkim96/bigflow
|
967e262bf1a0c0958ff41af81191d48f286b43c1
|
[
"Apache-2.0"
] | 133
|
2020-08-18T03:51:05.000Z
|
2022-03-05T13:43:22.000Z
|
test/buildd/bf-projects/bf_selfbuild_project/bf_selfbuild_module.py
|
busunkim96/bigflow
|
967e262bf1a0c0958ff41af81191d48f286b43c1
|
[
"Apache-2.0"
] | 10
|
2020-08-25T05:19:31.000Z
|
2022-02-03T10:33:41.000Z
|
import bigflow.build.reflect as r
def project_spec():
return r.get_project_spec()
| 17.4
| 33
| 0.758621
| 14
| 87
| 4.5
| 0.785714
| 0.349206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149425
| 87
| 4
| 34
| 21.75
| 0.851351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
17699dc7df31ebae7bfd805690d43039644f7a27
| 7,300
|
py
|
Python
|
micropython/badger2040_modules_py/badge_image.py
|
nathanmayall/pimoroni-pico
|
ee12d846a125770a76e7ed331d290ce83f41a0b3
|
[
"MIT"
] | 1
|
2022-03-12T13:54:28.000Z
|
2022-03-12T13:54:28.000Z
|
micropython/badger2040_modules_py/badge_image.py
|
nathanmayall/pimoroni-pico
|
ee12d846a125770a76e7ed331d290ce83f41a0b3
|
[
"MIT"
] | null | null | null |
micropython/badger2040_modules_py/badge_image.py
|
nathanmayall/pimoroni-pico
|
ee12d846a125770a76e7ed331d290ce83f41a0b3
|
[
"MIT"
] | null | null | null |
# Code generated by data_to_py.py.
version = '0.1'
_data =\
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x71\x0b\x40\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x09\xfc\x8b\xa8\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x04\xbf\x42\xee\x88\x00\x00\x00\x00\x80\x00'\
b'\x00\x18\x43\xd5\x6f\xb7\xf0\x00\x00\x04\x5b\x1a\x00\x00\x04\x0f'\
b'\x6f\xe3\x6d\x4d\x00\x00\x2b\xcd\xbc\x90\x00\x0e\xb5\xfa\xb7\xb7'\
b'\xec\x00\x00\x07\xfe\xee\x31\xa4\x27\x76\xaf\xf5\xf5\xbc\x00\x00'\
b'\x1a\xd7\x6b\x39\xe4\x05\xdd\xfa\xbe\xbe\xf2\x00\x00\x0f\x7a\xbe'\
b'\xb7\xdc\x27\x47\x5f\x6f\xe7\xa8\x00\x00\x2b\xaf\xd5\x1f\xee\x13'\
b'\x63\x7f\xfd\x7d\xf8\x00\x00\x0d\xf5\xff\xdf\xfd\x53\xb1\xde\xef'\
b'\xad\x44\x00\x00\x2e\xde\xae\xef\xf1\x0d\xf3\xef\xff\xf7\x98\x00'\
b'\x00\x0b\xfb\xfb\xfe\xfd\x0f\xad\xfe\xfd\x5e\xf0\x00\x00\x1d\x2d'\
b'\xaf\xb7\xee\x06\xbe\xff\x5f\xeb\xa0\x00\x00\x0d\xfe\xf5\xbf\xf7'\
b'\x0b\xd7\xff\xff\xfe\xe0\x00\x00\x73\x6b\x5f\xd7\xdb\x1e\xfa\xff'\
b'\xff\xdd\xa0\x00\x00\x07\xbf\xfd\xbf\xba\x03\xcd\xff\x6f\xf7\xe8'\
b'\x00\x00\x2e\xda\xd6\xff\xf4\x1f\xff\xff\xff\xfb\x20\x00\x00\x13'\
b'\xff\xff\x7b\xd6\x1d\xff\xfb\xad\xfb\xc0\x00\x00\x0b\x5e\xaf\xbf'\
b'\xf9\x05\xff\xbe\xdf\xfe\xd0\x00\x00\x57\xfb\xf6\xfd\xbb\x07\x5f'\
b'\xe7\x7b\xfa\x00\x00\x00\x45\x6d\x5f\x77\xee\x07\xdf\xf7\xbf\xff'\
b'\x20\x04\x00\x27\xbd\xfb\xbf\x7b\x0b\xff\xfb\xfd\xfb\x88\x00\x00'\
b'\x05\xdf\x3d\xff\xaf\x0a\xff\xf6\xff\xfe\x80\x00\x00\x15\x6d\xef'\
b'\xff\x7f\x01\xdf\xff\xbf\x7f\x00\x00\x00\x03\xff\xf6\xff\xff\x05'\
b'\x6f\xf6\xff\xef\x80\x00\x00\x4d\x5a\xff\xfd\xff\x02\xbb\xff\xff'\
b'\xf9\x80\x00\x00\x0f\x6f\x5d\xff\x7f\x07\x6f\x9b\xff\xfc\xa0\x00'\
b'\x00\x5d\xff\xff\xff\xf7\x00\xee\xee\xff\xee\x00\x00\x00\x17\x95'\
b'\x57\xfb\xff\x00\x37\xff\x7f\xff\xa0\x00\x00\x95\xff\xfd\xff\xff'\
b'\x22\x9d\xff\xff\xee\x80\x00\x00\x7e\xaa\xdf\xff\xff\x17\x97\x5f'\
b'\xff\xff\xc0\x40\x20\x2b\xff\xe7\xff\xff\x0f\x72\xef\xef\xfb\x60'\
b'\x00\x00\x1f\xf5\x3f\x7f\xff\x09\x3b\x7f\xff\xfd\x90\x20\x00\x4b'\
b'\x3f\xff\xff\xff\x00\x0d\xd7\xff\xef\x40\x00\x00\x5b\xad\xb7\xff'\
b'\xff\x04\x0f\x7f\xfd\xdb\x00\x00\x00\x1e\xf7\x7f\xff\xfe\x0a\x01'\
b'\xf7\xff\xfd\x90\x00\x00\x2f\xad\xff\xff\xfb\x18\x05\xbd\xff\xed'\
b'\xc0\x00\x04\x2b\xf6\xff\xff\xfb\x08\x10\xef\xff\xfe\x20\x04\x00'\
b'\xae\xbb\x7f\xff\xfc\x01\x02\xb5\xff\xf7\x00\x00\x00\x1b\xdf\xdf'\
b'\xff\xfe\x08\x00\xff\xff\xfd\x80\x00\x00\x1d\xf5\x7f\xfe\xee\x00'\
b'\x00\x16\xff\xef\x00\x00\x00\x16\xfd\xbf\xff\x65\x00\x02\xf7\xbf'\
b'\xf6\x80\x00\x00\x3b\x56\xff\xfe\x30\x10\x01\x3b\xe7\xda\x10\x00'\
b'\x00\x3f\xff\x7f\xe8\x80\x18\x00\x0f\x5f\xee\x00\x08\x00\x0d\xab'\
b'\xff\xf2\x04\x02\x00\x55\xf5\xf6\x02\x00\x00\x57\xee\xff\xc9\x80'\
b'\x10\x00\x0e\xbf\x77\x00\x00\x00\x1f\xb7\x7f\xc4\x00\x38\x00\x17'\
b'\xf7\x7c\x00\x00\x00\x0b\xff\xff\x50\x00\x11\x80\x0c\xe7\x7e\x00'\
b'\x00\x00\x0d\xd7\xff\xe0\x00\x01\x00\x05\xf0\x58\x00\x00\x00\x1e'\
b'\xfb\xbf\xc0\x01\x01\x00\x1f\xcd\xec\x00\x00\x00\x0f\xcf\xfe\x00'\
b'\x01\x03\x00\x07\xf7\x7c\x00\x00\x00\x0e\x8a\xdf\x00\x01\x0a\x00'\
b'\x05\xf8\x0c\x00\x00\x00\x0f\xcd\xfe\x80\x00\x0a\x00\x05\xd1\xc6'\
b'\x00\x00\x00\x08\x24\xfe\x80\x00\x20\x00\x05\x22\x76\x00\x00\x00'\
b'\x0c\x01\x7a\x00\x00\x00\x00\x03\x5f\xd6\x00\x00\x00\x3f\x70\xfc'\
b'\x00\x00\x07\x40\x01\x3f\xf3\x00\x00\x00\x1b\xf8\xfe\x00\x00\x09'\
b'\xe0\x00\x3f\xf3\x00\x00\x00\x1b\xfa\xfa\x00\x00\x09\x20\x03\x8f'\
b'\xf3\x00\x00\x00\x1d\xf9\xfc\x00\x00\x04\x20\x01\xcf\xfc\x80\x00'\
b'\x00\x3b\xf8\xf8\x00\x00\x62\x00\x00\xcf\xe3\x00\x00\x00\x32\xe1'\
b'\xf0\x00\x01\x72\x80\x01\xe0\x03\x00\x00\x00\x36\x71\xf0\x00\x00'\
b'\x17\x00\x01\xd9\x4b\x00\x00\x00\x39\x83\xf0\x00\x00\x0e\x82\x05'\
b'\xc9\x83\x80\x08\x00\x28\x83\xf0\x00\x07\x01\x81\x00\xfa\x8b\x00'\
b'\x00\x00\x3c\x07\xf8\x00\x23\x55\x80\x00\xff\xbf\x00\x40\x00\x17'\
b'\x2f\xfc\x00\x07\x00\xc1\x00\x7e\xd5\x85\x00\x00\x12\x3f\xc0\x00'\
b'\x0b\x15\x20\x11\x7e\x3f\x00\x04\x00\x3f\x7f\xf0\x08\x25\x82\xc4'\
b'\x00\x7d\xeb\x00\x00\x00\x2c\xdf\xe8\x01\x17\x51\xe0\x00\x3f\xff'\
b'\x00\x00\x00\x2f\xff\xc0\x00\x4b\x15\x61\x00\x1f\xd7\x00\x00\x00'\
b'\x3f\xff\xc0\x05\x0e\x09\xd4\x00\x57\xfe\x00\x00\x0a\x27\xff\xa0'\
b'\x00\x3f\x6b\x70\xa0\x0f\xea\x00\x00\x00\x1d\xff\x00\x00\x8d\x01'\
b'\xf8\x00\x07\xfe\x00\x00\x00\x17\xf8\x80\x4a\x76\x56\xbc\x00\x1f'\
b'\xfa\x00\x00\x00\x3f\xfd\x40\x08\x3f\x0b\x75\x50\x03\xfe\x00\x08'\
b'\x00\x17\xec\x00\x01\x7b\x25\xae\x00\x0d\xfb\x00\x00\x00\x1b\xfc'\
b'\x00\x15\xdd\x10\xfb\x48\x05\xfe\x00\x00\x00\x0f\xfc\x01\x41\xef'\
b'\x0f\x7d\x44\x07\xfe\x00\x00\x00\x17\xfc\x00\x36\xfb\x05\xaf\x90'\
b'\x0a\xf2\x00\x00\x00\x1f\xd8\x00\x8b\x77\x01\xfa\xa8\x03\x7c\x80'\
b'\x04\x00\x1b\xf4\x02\x3d\xfe\x00\x5f\xa4\x01\xfc\x00\x02\x00\x4d'\
b'\xf0\x08\x9f\xbf\x05\xaa\xfe\x01\x74\x80\x00\x20\x2f\xe0\x06\xed'\
b'\xd7\x12\x2f\xb5\x01\xdd\x00\x00\x10\x05\xe0\x11\x77\xff\x08\x5a'\
b'\xdf\x81\xf0\x00\x00\x00\x01\xb0\x0b\xbd\xdf\x05\x0f\x6b\xc0\xca'\
b'\x00\x94\x8a\x00\xe0\x2d\xee\xfb\x00\x97\xbd\xe3\x00\x02\x42\x41'\
b'\x00\xe0\xad\x7f\xff\x00\x7f\xff\x71\xa0\x08\x34\xf8\x00\x00\xff'\
b'\xae\xef\x05\xe9\xfb\xa9\x80\x03\xd7\xbe\x80\x61\xfe\xdb\xbf\x00'\
b'\x17\xfd\xe1\x00\x3f\x7a\xd6\x00\x60\xbb\xee\xff\x00\x5b\xfe\xf1'\
b'\x00\x2b\xdf\x7b\xc0\x03\xfd\x7f\xff\x05\xbd\xfb\xd8\x00\xfe\xeb'\
b'\xd7\x60\x23\xaf\xf7\x7f\x00\x56\xdf\xf9\x03\xeb\x5c\x5d\xf0\x03'\
b'\xde\xff\xff\x0a\x1f\x7f\xf8\x01\x3d\xec\x4d\xb0\x27\xef\x7f\xff'\
b'\x01\x27\xff\xfd\x03\xdd\x65\x26\xd0\x06\xb7\xd7\xff\x09\xe8\xbf'\
b'\xec\x02\xef\xb3\x2b\xf0\x2f\xff\xff\xff\x00\x01\xff\xfc\x13\x58'\
b'\xb5\x57\x50\x0d\xff\xff\xff\x00\x2e\xff\x6e\x0b\xeb\xa2\x65\xf8'\
b'\x2f\xff\xff\xff\x02\x7e\xef\xee\x05\xba\xf5\x76\xba\x3f\xdf\xff'\
b'\xff\x01\xab\xba\xfe\x13\x5b\xb1\xbb\xd8\x2f\x7f\xff\xff\x00\x36'\
b'\xff\x9b\x05\xef\xde\xdf\x78\x3f\xff\xff\xff\x05\x68\x5d\xef\x07'\
b'\x7f\xef\xff\xf8\xab\xff\xff\xff\x04\x4b\xef\xf7\x83\xbf\xf4\xff'\
b'\x78\x7a\xff\xff\xff\x02\x20\xb6\xff\x42\xff\xc3\xff\xf8\xff\x7f'\
b'\xff\xff\x00\x84\xdf\xbd\xc1\xd7\xf4\xea\xf0\xff\xff\xfe\xff\x00'\
b'\x23\x6d\xfe\xc0\xfb\x77\x7f\xc1\x3f\xff\xff\xff\x00\x15\xf7\xff'\
b'\xf0\x5b\x76\xd7\xc3\x4f\xff\xff\xff\x00\x09\x5f\xff\xd0\x3d\xbb'\
b'\x7f\x07\x75\xff\xff\xff\x00\x45\xff\xff\x78\x17\xef\xfc\x1b\x2f'\
b'\xff\xff\xff\x01\x04\xaf\xff\x57\x01\x77\xf0\x5f\xad\xff\xff\xff'\
b'\x00\x06\x3f\xff\xe5\x81\x3f\x02\xf6\xe7\xff\xff\xff\x01\x22\x1b'\
b'\xfd\xad\xa8\x00\x59\x9f\xff\xff\xff\xff\x04\x90\x6f\xff\xeb\xcf'\
b'\xdb\xed\x55\xea\xff\xff\xff\x04\x00\x37\xbf\x6e\xf5\xfe\xfe\xff'\
b'\xfe\xff\xff\xff\x00\x00\xff\xff\xf7\x5e\xb7\x6f\x7f\xeb\x7f\xff'\
b'\xff\x00\x01\xed\xfd\xfa\xf5\xfd\xf5\xff\xff\xbf\xff\xff\x00\x07'\
b'\x97\x7f\xbf\xbf\xff\xfe\xfb\xfa\xff\xff\xff\x00\x4c\x4f\xff\xef'\
b'\xf5\xff\x7f\xff\xff\x7f\xff\xff\x00\x00\x2f\xff\xff\x7f\xff\xff'\
b'\xff\xfb\x97\xff\xff\x00\x16\xdd\xff\xef\xff\xff\xff\xff\xfd\x4d'\
b'\xff\xff\x00\x71\x97\xff\xff\xff\xff\xff\xfa\xfe\x2f\x7f\xff\x00'\
b'\x12\x7d\xff\xef\xff\xff\xff\xff\xff\xa1\xff\xff\x01\x09\x57\xfd'\
b'\xff\xff\xff\xff\xff\x7e\x36\xbf\xff\x00\x85\xff\xff\xff\xff\xff'\
b'\xff\xff\xab\x5b\xeb\xff\x00\x18\x54\xff\xfb\xff\xff\xff\xff\xff'\
b'\x80\xbf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'\
_mvdata = memoryview(_data)
def data():
return _mvdata
| 63.478261
| 68
| 0.709452
| 1,786
| 7,300
| 2.896417
| 0.120941
| 0.164701
| 0.104388
| 0.076551
| 0.088343
| 0.064566
| 0.034216
| 0.034216
| 0.034216
| 0.028223
| 0
| 0.251186
| 0.017808
| 7,300
| 114
| 69
| 64.035088
| 0.470293
| 0.004384
| 0
| 0
| 1
| 0.954128
| 0.91646
| 0.916047
| 0
| 1
| 0
| 0
| 0
| 1
| 0.009174
| false
| 0
| 0
| 0.009174
| 0.018349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bd9f0d838f96c1f1d71591933aae70bfbeaaab2e
| 208
|
py
|
Python
|
python_modules/libraries/dagster-docker/dagster_docker/__init__.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-07-03T09:05:58.000Z
|
2021-07-03T09:05:58.000Z
|
python_modules/libraries/dagster-docker/dagster_docker/__init__.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-06-21T18:30:02.000Z
|
2021-06-25T21:18:39.000Z
|
python_modules/libraries/dagster-docker/dagster_docker/__init__.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-11-30T21:40:46.000Z
|
2021-11-30T21:40:46.000Z
|
from dagster.core.utils import check_dagster_package_version
from .docker_run_launcher import DockerRunLauncher
from .version import __version__
check_dagster_package_version("dagster-docker", __version__)
| 29.714286
| 60
| 0.875
| 26
| 208
| 6.384615
| 0.461538
| 0.144578
| 0.228916
| 0.313253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 208
| 6
| 61
| 34.666667
| 0.864583
| 0
| 0
| 0
| 0
| 0
| 0.067308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bda705e98e99df43b5bc9b7328e2966fc163569c
| 205
|
py
|
Python
|
detect_secrets/filters/__init__.py
|
paulo-sampaio/detect-secrets
|
73ffbc35a72cb316d9e1842cc131b6098cf3c36a
|
[
"Apache-2.0"
] | 2,212
|
2018-04-03T20:58:42.000Z
|
2022-03-31T17:58:38.000Z
|
detect_secrets/filters/__init__.py
|
paulo-sampaio/detect-secrets
|
73ffbc35a72cb316d9e1842cc131b6098cf3c36a
|
[
"Apache-2.0"
] | 354
|
2018-04-03T16:29:55.000Z
|
2022-03-31T18:26:26.000Z
|
detect_secrets/filters/__init__.py
|
paulo-sampaio/detect-secrets
|
73ffbc35a72cb316d9e1842cc131b6098cf3c36a
|
[
"Apache-2.0"
] | 298
|
2018-04-02T19:35:15.000Z
|
2022-03-28T04:52:14.000Z
|
from . import allowlist # noqa: F401
from . import gibberish # noqa: F401
from . import heuristic # noqa: F401
from . import regex # noqa: F401
from . import wordlist # noqa: F401
| 34.166667
| 40
| 0.634146
| 25
| 205
| 5.2
| 0.36
| 0.384615
| 0.369231
| 0.553846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 0.292683
| 205
| 5
| 41
| 41
| 0.793103
| 0.263415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
bdc21f20f0c1fa44dd648a7cdaa5eae113f3db9a
| 1,121
|
py
|
Python
|
tests/odm/documents/test_replace.py
|
yo-mo/beanie
|
1641dd81be64dd1dc11af667deb2e50feb2de2be
|
[
"Apache-2.0"
] | 574
|
2021-03-16T12:49:12.000Z
|
2022-03-30T11:45:33.000Z
|
tests/odm/documents/test_replace.py
|
yo-mo/beanie
|
1641dd81be64dd1dc11af667deb2e50feb2de2be
|
[
"Apache-2.0"
] | 148
|
2021-03-16T22:02:37.000Z
|
2022-03-31T21:04:47.000Z
|
tests/odm/documents/test_replace.py
|
yo-mo/beanie
|
1641dd81be64dd1dc11af667deb2e50feb2de2be
|
[
"Apache-2.0"
] | 53
|
2021-03-16T21:53:14.000Z
|
2022-03-31T12:51:51.000Z
|
from tests.odm.models import Sample
async def test_replace_one(preset_documents):
count_1_before = await Sample.find_many(Sample.integer == 1).count()
count_2_before = await Sample.find_many(Sample.integer == 2).count()
a_2 = await Sample.find_one(Sample.integer == 2)
await Sample.find_one(Sample.integer == 1).replace_one(a_2)
count_1_after = await Sample.find_many(Sample.integer == 1).count()
count_2_after = await Sample.find_many(Sample.integer == 2).count()
assert count_1_after == count_1_before - 1
assert count_2_after == count_2_before + 1
async def test_replace_self(preset_documents):
count_1_before = await Sample.find_many(Sample.integer == 1).count()
count_2_before = await Sample.find_many(Sample.integer == 2).count()
a_1 = await Sample.find_one(Sample.integer == 1)
a_1.integer = 2
await a_1.replace()
count_1_after = await Sample.find_many(Sample.integer == 1).count()
count_2_after = await Sample.find_many(Sample.integer == 2).count()
assert count_1_after == count_1_before - 1
assert count_2_after == count_2_before + 1
| 36.16129
| 72
| 0.724353
| 177
| 1,121
| 4.276836
| 0.141243
| 0.159841
| 0.217966
| 0.200793
| 0.852048
| 0.852048
| 0.852048
| 0.72391
| 0.72391
| 0.72391
| 0
| 0.039362
| 0.161463
| 1,121
| 30
| 73
| 37.366667
| 0.765957
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
da473dbc1afe25ce1d504ecef141a244da6126a2
| 144,597
|
py
|
Python
|
optimizations/experiments/cse_benchmark.py
|
andrejjakovljevic/arkouda-optimizations
|
233ea5b970cea89c853186a2f297b4bdf11d836d
|
[
"MIT"
] | null | null | null |
optimizations/experiments/cse_benchmark.py
|
andrejjakovljevic/arkouda-optimizations
|
233ea5b970cea89c853186a2f297b4bdf11d836d
|
[
"MIT"
] | null | null | null |
optimizations/experiments/cse_benchmark.py
|
andrejjakovljevic/arkouda-optimizations
|
233ea5b970cea89c853186a2f297b4bdf11d836d
|
[
"MIT"
] | null | null | null |
import arkouda as ak
import math
import time
# Source : https://www.growingwiththeweb.com/sorting/radix-sort-lsd/
def radix_sort(array, radix=10):
"""
Performs an LSD radix sort on an array given a radix.
"""
if len(array) == 0:
return array
# Determine minimum and maximum values
min_value = array[0]
max_value = array[0]
for i in range(1, len(array)):
if array[i] < min_value:
min_value = array[i]
elif array[i] > max_value:
max_value = array[i]
# Perform counting sort on each exponent/digit, starting at the least
# significant digit
exponent = 1
while (max_value - min_value) / exponent >= 1:
array = counting_sort_by_digit(array, radix, exponent, min_value)
exponent *= radix
return array
def counting_sort_by_digit(array, radix, exponent, min_value):
bucket_index = -1
buckets = ak.zeros(radix, dtype=ak.int64)
output = ak.zeros(len(array), dtype=ak.int64)
# Count frequencies
for i in range(0, len(array)):
bucket_index = math.floor(((array[i] - min_value) / exponent) % radix)
buckets[bucket_index] += 1
# Compute cumulates
for i in range(1, radix):
buckets[i] += buckets[i - 1]
# Move records
for i in range(len(array) - 1, -1, -1):
bucket_index = math.floor(((array[i] - min_value) / exponent) % radix)
buckets[bucket_index] -= 1
output[buckets[bucket_index]] = array[i]
return output
# Source: https://gist.github.com/CTimmerman/2f8edd2de074ff3c28ebb148cc25426d
def sort_v2(source):
RADIX = 10
# buckets = tuple([] for i in range(RADIX))
buckets = [[] for i in range(RADIX)] # 3% faster than a tuple in Python 3.7.4 32-bit on Windows 10 Home.
# sort
maxLength = False
tmp = -1;
placement = 1
while not maxLength:
maxLength = True
# split input between lists
for i in range(len(source)):
j = source[i]
tmp = j // placement
buckets[tmp % RADIX].append(j)
if maxLength and tmp > 0:
maxLength = False
# empty lists into input array
a = 0
for bucket in buckets:
for i in bucket:
source[a] = i
a += 1
bucket.clear()
# move to next digit
placement *= RADIX
# Source: https://gitter.im/numba/numba?at=5c920df48126720abc1228b4
def cse(L, M):
R = ak.zeros(32, dtype=ak.int64)
D = ak.zeros(32, dtype=ak.int64)
C = ak.zeros(32, dtype=ak.int64)
K = ak.zeros(32, dtype=ak.int64)
V = ak.zeros(32, dtype=ak.int64)
Mtemp = ak.zeros(32, dtype=ak.int64)
Ltemp = ak.zeros(32, dtype=ak.int64)
Ltemp[17] = L[17] / 2.0 + L[18] / 2.0; # e1 ^ (e2 ^ einf)
Ltemp[18] = (-L[17]) + L[18]; # e1 ^ (e2 ^ e0)
Ltemp[19] = L[19] / 2.0 + L[20] / 2.0; # e1 ^ (e3 ^ einf)
Ltemp[20] = (-L[19]) + L[20]; # e1 ^ (e3 ^ e0)
Ltemp[22] = L[22] / 2.0 + L[23] / 2.0; # e2 ^ (e3 ^ einf)
Ltemp[23] = (-L[22]) + L[23]; # e2 ^ (e3 ^ e0)
Mtemp[17] = M[17] / 2.0 + M[18] / 2.0; # e1 ^ (e2 ^ einf)
Mtemp[18] = (-M[17]) + M[18]; # e1 ^ (e2 ^ e0)
Mtemp[19] = M[19] / 2.0 + M[20] / 2.0; # e1 ^ (e3 ^ einf)
Mtemp[20] = (-M[19]) + M[20]; # e1 ^ (e3 ^ e0)
Mtemp[22] = M[22] / 2.0 + M[23] / 2.0; # e2 ^ (e3 ^ einf)
Mtemp[23] = (-M[22]) + M[23]; # e2 ^ (e3 ^ e0)
K[0] = 2.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[20] + Mtemp[
20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[24] * L[24] + M[25] * L[
25] + (-(L[16] * M[16])) + Ltemp[17] * Mtemp[18] + Ltemp[18] * Mtemp[17] + Ltemp[19] * Mtemp[20] + Ltemp[
20] * Mtemp[19] + L[21] * M[21] + Ltemp[22] * Mtemp[23] + Ltemp[23] * Mtemp[22] + L[24] * M[24] + L[25] * \
M[25]; # 1.0
K[6] = Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21])) + Ltemp[19] * Mtemp[23] + Ltemp[20] * Mtemp[22] + L[21] * M[
24] + (-(Ltemp[22] * Mtemp[20])) + (-(Ltemp[23] * Mtemp[19])) + (-(L[24] * M[21])); # e1 ^ e2
K[7] = (-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[23] * \
Ltemp[17] + (-(M[25] * L[21])) + (-(Ltemp[17] * Mtemp[23])) + (-(Ltemp[18] * Mtemp[22])) + L[21] * M[25] + \
Ltemp[22] * Mtemp[18] + Ltemp[23] * Mtemp[17] + (-(L[25] * M[21])); # e1 ^ e3
K[8] = (-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] * Ltemp[
17] + M[25] * Ltemp[19] + (-(L[16] * Mtemp[22])) + (-(Ltemp[17] * M[24])) + (-(Ltemp[19] * M[25])) + Ltemp[22] * \
M[16] + L[24] * Mtemp[17] + L[25] * Mtemp[19]; # e1 ^ einf
K[9] = (-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20])) + (-(L[16] * Mtemp[23])) + Ltemp[18] * M[24] + Ltemp[20] * M[
25] + Ltemp[23] * M[16] + (-(L[24] * Mtemp[18])) + (-(L[25] * Mtemp[20])); # e1 ^ e0
K[10] = Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (-(Mtemp[20] * Ltemp[17])) + M[
24] * L[25] + (-(M[25] * L[24])) + Ltemp[17] * Mtemp[20] + Ltemp[18] * Mtemp[19] + (
-(Ltemp[19] * Mtemp[18])) + (-(Ltemp[20] * Mtemp[17])) + L[24] * M[25] + (-(L[25] * M[24])); # e2 ^ e3
K[11] = M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22] + L[16] * Mtemp[19] + Ltemp[17] * M[21] + (-(Ltemp[19] * M[16])) + (
-(L[21] * Mtemp[17])) + (-(Ltemp[22] * M[25])) + L[25] * Mtemp[22]; # e2 ^ einf
K[12] = M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23])) + L[16] * Mtemp[20] + (-(Ltemp[18] * M[21])) + (-(Ltemp[20] * M[16])) + L[21] * \
Mtemp[18] + Ltemp[23] * M[25] + (-(L[25] * Mtemp[23])); # e2 ^ e0
K[13] = (-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[22] * L[
24] + (-(M[24] * Ltemp[22])) + (-(L[16] * Mtemp[17])) + Ltemp[17] * M[16] + Ltemp[19] * M[21] + (
-(L[21] * Mtemp[19])) + Ltemp[22] * M[24] + (-(L[24] * Mtemp[22])); # e3 ^ einf
K[14] = (-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23] + (-(L[16] * Mtemp[18])) + Ltemp[18] * M[16] + (
-(Ltemp[20] * M[21])) + L[21] * Mtemp[20] + (-(Ltemp[23] * M[24])) + L[24] * Mtemp[23]; # e3 ^ e0
K[15] = (-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] * Ltemp[19] + (
-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22] + (-(Ltemp[17] * Mtemp[18])) + Ltemp[18] * Mtemp[17] + (
-(Ltemp[19] * Mtemp[20])) + Ltemp[20] * Mtemp[19] + (-(Ltemp[22] * Mtemp[23])) + Ltemp[23] * Mtemp[
22]; # einf ^ e0
K[26] = (-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] * \
Ltemp[19] + (-(M[25] * Ltemp[17])) + (-(Ltemp[17] * M[25])) + Ltemp[19] * M[24] + (-(L[21] * Mtemp[22])) + (
-(Ltemp[22] * M[21])) + L[24] * Mtemp[19] + (-(L[25] * Mtemp[17])); # e1 ^ (e2 ^ (e3 ^ einf))
K[27] = Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18] + Ltemp[18] * M[25] + (-(Ltemp[20] * M[24])) + L[21] * Mtemp[23] + \
Ltemp[23] * M[21] + (-(L[24] * Mtemp[20])) + L[25] * Mtemp[18]; # e1 ^ (e2 ^ (e3 ^ e0))
K[28] = M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16] + L[16] * M[25] + (-(Ltemp[19] * Mtemp[23])) + Ltemp[20] * Mtemp[22] + \
Ltemp[22] * Mtemp[20] + (-(Ltemp[23] * Mtemp[19])) + L[25] * M[16]; # e1 ^ (e2 ^ (einf ^ e0))
K[29] = (-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) + \
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16])) + (-(L[16] * M[24])) + Ltemp[17] * Mtemp[23] + (
-(Ltemp[18] * Mtemp[22])) + (-(Ltemp[22] * Mtemp[18])) + Ltemp[23] * Mtemp[17] + (
-(L[24] * M[16])); # e1 ^ (e3 ^ (einf ^ e0))
K[30] = M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16] + L[16] * M[21] + (-(Ltemp[17] * Mtemp[20])) + Ltemp[18] * Mtemp[19] + \
Ltemp[19] * Mtemp[18] + (-(Ltemp[20] * Mtemp[17])) + L[21] * M[16]; # e2 ^ (e3 ^ (einf ^ e0))
C[0] = 2.0 * (K[0] - K[15]); # 1.0
C[6] = 2.0 * (K[6] - K[28]); # e1 ^ e2
C[7] = 2.0 * (K[7] - K[29]); # e1 ^ e3
C[8] = 2.0 * (K[8] - K[8]); # e1 ^ einf
C[9] = 2.0 * K[9]; # e1 ^ e0
C[10] = 2.0 * (K[10] - K[30]); # e2 ^ e3
C[11] = 2.0 * (K[11] - K[11]); # e2 ^ einf
C[12] = 2.0 * K[12]; # e2 ^ e0
C[13] = 2.0 * (K[13] - K[13]); # e3 ^ einf
C[14] = 2.0 * K[14]; # e3 ^ e0
C[15] = 2.0 * (K[15] - K[15]); # einf ^ e0
C[26] = 2.0 * (K[26] - K[26]); # e1 ^ (e2 ^ (e3 ^ einf))
C[27] = 2.0 * K[27]; # e1 ^ (e2 ^ (e3 ^ e0))
C[28] = 2.0 * (K[28] - K[28]); # e1 ^ (e2 ^ (einf ^ e0))
C[29] = 2.0 * (K[29] - K[29]); # e1 ^ (e3 ^ (einf ^ e0))
C[30] = 2.0 * (K[30] - K[30]); # e2 ^ (e3 ^ (einf ^ e0))
D[0] = 1.0 - (K[15] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + (-(K[28] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[29] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[8] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (
-C[8]) + (-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[
14] * (-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[30] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[11] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (
-C[8]) + (-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[
14] * (-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[13] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (
-C[8]) + (-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[
14] * (-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[15] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (
-C[8]) + (-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[
14] * (-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[26] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[28] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[29] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[30] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30])))))); # 1.0
D[6] = (-(K[15] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + K[28] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[29] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[8] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[30] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[11] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[13] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[15] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[26] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[28] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[29] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[30] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))))); # e1 ^ e2
D[7] = (-(K[15] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + K[28] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[29] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[8] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[30] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[11] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[13] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[15] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[26] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[28] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[29] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[30] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))))); # e1 ^ e3
D[8] = (-(K[15] * (-C[8]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + K[28] * (-C[11]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[29] * (-C[13]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[8] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[8] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[30] * C[26] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[11] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[11] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[13] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[13] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[15] * (-C[8]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[26] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[26] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[28] * (-C[11]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[29] * (-C[13]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[30] * C[26] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))))); # e1 ^ einf
D[9] = (-(K[15] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + K[28] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[29] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[30] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[15] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[28] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[29] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) +
C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (
-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[30] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))))); # e1 ^ e0
D[10] = (-(K[15] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + (-(K[28] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[29] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[8] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[30] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[11] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[13] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[15] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[26] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[28] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[29] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[30] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))))); # e2 ^ e3
D[11] = (-(K[15] * (-C[11]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + (-(K[28] * (-C[8]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[29] * C[26] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[8] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[8] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[30] * (-C[13]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[11] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[11] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[13] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[13] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[15] * (-C[11]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[26] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[26] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[28] * (-C[8]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[29] * C[26] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[30] * (-C[13]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))))); # e2 ^ einf
D[12] = (-(K[15] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + (-(K[28] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[29] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[30] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[15] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[28] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[29] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[30] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))))); # e2 ^ e0
D[13] = (-(K[15] * (-C[13]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + (-(K[28] * C[26] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[29] * (-C[8]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[8] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[8] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[30] * (-C[11]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[11] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[11] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[13] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[13] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[15] * (-C[13]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[26] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[26] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[28] * C[26] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[29] * (-C[8]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[30] * (-C[11]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))))); # e3 ^ einf
D[14] = (-(K[15] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + (-(K[28] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[29] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[30] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[15] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[28] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[29] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[30] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))))); # e3 ^ e0
D[15] = (-(K[15] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + (-(K[28] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[29] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[8] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[30] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[11] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[13] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[15] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[26] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[28] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[29] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[30] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))))); # einf ^ e0
D[26] = (-(K[15] * C[26] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + K[28] * (-C[13]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[29] * (-C[11]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[8] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[8] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[30] * (-C[8]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[11] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[11] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[13] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[13] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[15] * C[26] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[26] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[26] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[28] * (-C[13]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[29] * (-C[11]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[30] * (-C[8]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))))); # e1 ^ (e2 ^ (e3 ^ einf))
D[27] = (-(K[15] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + K[28] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[29] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[30] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[15] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[28] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[29] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[30] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))))); # e1 ^ (e2 ^ (e3 ^ e0))
D[28] = (-(K[15] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + K[28] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[29] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[8] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[30] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[11] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[13] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[15] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[26] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[28] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[29] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[30] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))); # e1 ^ (e2 ^ (einf ^ e0))
D[29] = (-(K[15] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + K[28] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[29] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[8] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[30] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[11] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[13] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[15] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[26] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[28] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[29] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[30] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))))); # e1 ^ (e3 ^ (einf ^ e0))
D[30] = (-(K[15] * C[30] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))) + (-(K[28] * C[29] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[29] * C[28] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[8] * C[27] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[30] * (-C[15]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[11] * (-C[14]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[13] * (-C[12]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[15] * (-C[10]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + (-(K[26] * (-C[9]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + (-(K[28] * (-C[7]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (-C[13]) + C[15] * (
-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))) + K[29] * (-C[6]) / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (-(C[30] * C[30]))) + K[30] * C[0] / (
C[0] * C[0] + (-(C[6] * (-C[6]))) + (-(C[7] * (-C[7]))) + C[8] * (-C[9]) + C[9] * (-C[8]) + (
-(C[10] * (-C[10]))) + C[11] * (-C[12]) + C[12] * (-C[11]) + C[13] * (-C[14]) + C[14] * (
-C[13]) + C[15] * (-C[15]) + (-(C[26] * C[27])) + (-(C[27] * C[26])) + (
-(C[28] * C[28])) + (-(C[29] * C[29])) + (
-(C[30] * C[30]))))); # e2 ^ (e3 ^ (einf ^ e0))
R[0] = D[0] * (1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[20] +
Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[24] * L[
24] + M[25] * L[25]) + (-(D[6] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))))) + (-(D[7] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[
23] * Ltemp[17] + (-(M[25] * L[21]))))) + D[8] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))) + D[9] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[
24] * Ltemp[17] + M[25] * Ltemp[19]) + (-(D[10] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))))) + D[11] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[
23] * L[25] + (-(M[25] * Ltemp[23]))) + D[12] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]) + D[13] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + D[14] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + D[15] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[26] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]))) + (-(D[27] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] *
Ltemp[19] + (-(M[25] * Ltemp[17]))))) + (-(D[28] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]))) + (-(D[29] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) +
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))))) + (-(D[30] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]))); # 1.0
R[6] = D[0] * (Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))) + D[6] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[24] *
L[24] + M[25] * L[25]) + (-(D[7] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))))) + D[8] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[
23] * L[25] + (-(M[25] * Ltemp[23]))) + D[9] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]) + D[10] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] +
Mtemp[23] * Ltemp[17] + (-(M[25] * L[21]))) + (-(D[11] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))))) + (-(D[12] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] *
Ltemp[17] + M[25] * Ltemp[19]))) + D[13] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]) + D[14] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[
24] * Ltemp[19] + (-(M[25] * Ltemp[17]))) + D[15] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]) + D[26] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + D[27] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + D[28] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[29] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]))) + D[30] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (
-(Mtemp[22] * Ltemp[18])) + Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))); # e1 ^ e2
R[7] = D[0] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[
23] * Ltemp[17] + (-(M[25] * L[21]))) + D[6] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))) + D[7] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[24] *
L[24] + M[25] * L[25]) + D[8] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + D[9] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + (-(D[10] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))))) + (-(D[11] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]))) + (-(D[12] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] *
Ltemp[19] + (-(M[25] * Ltemp[17]))))) + (-(D[13] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))))) + (-(D[14] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] *
Ltemp[17] + M[25] * Ltemp[19]))) + D[15] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (
-(Mtemp[22] * Ltemp[18])) + Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))) + (-(D[26] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23]))))) + (-(D[27] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]))) + D[28] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]) + D[29] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[30] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]))); # e1 ^ e3
R[8] = D[0] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] *
Ltemp[17] + M[25] * Ltemp[19]) + D[6] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]) + D[7] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + D[8] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[24] *
L[24] + M[25] * L[25]) + D[8] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[10] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] *
Ltemp[19] + (-(M[25] * Ltemp[17]))))) + (-(D[11] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))))) + (-(D[11] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]))) + (-(D[13] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[
23] * Ltemp[17] + (-(M[25] * L[21]))))) + (-(D[13] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) +
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))))) + (-(D[15] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] *
Ltemp[17] + M[25] * Ltemp[19]))) + (-(D[26] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))))) + (-(D[26] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]))) + (-(D[28] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]))) + (-(D[29] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[22] * L[
24] + (-(M[24] * Ltemp[22]))))) + D[30] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[
24] * Ltemp[19] + (-(M[25] * Ltemp[17]))); # e1 ^ einf
R[9] = D[0] * ((-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))) + D[6] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[
23] * L[25] + (-(M[25] * Ltemp[23]))) + D[7] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + D[9] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[24] *
L[24] + M[25] * L[25]) + (-(D[9] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] * Ltemp[
19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]))) + (-(D[10] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]))) + (-(D[12] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))))) + D[12] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]) + (-(D[14] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[
23] * Ltemp[17] + (-(M[25] * L[21]))))) + D[14] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (
-(Mtemp[22] * Ltemp[18])) + Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))) + D[15] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))) + (-(D[27] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))))) + D[27] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]) + D[28] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[
23] * L[25] + (-(M[25] * Ltemp[23]))) + D[29] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + (-(D[30] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]))); # e1 ^ e0
R[10] = D[0] * (Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))) + (-(D[6] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[
23] * Ltemp[17] + (-(M[25] * L[21]))))) + D[7] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))) + D[8] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]) + D[9] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) +
M[24] * Ltemp[19] + (-(M[25] * Ltemp[17]))) + D[10] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + D[11] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + D[12] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + (-(D[13] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23]))))) + (-(D[14] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]))) + D[15] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]) + D[26] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))) + D[27] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] +
M[24] * Ltemp[17] + M[25] * Ltemp[19]) + (-(D[28] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) +
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))))) + D[29] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]) + D[30] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]); # e2 ^ e3
R[11] = D[0] * (M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]) + (-(D[6] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] *
Ltemp[17] + M[25] * Ltemp[19]))) + D[7] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) +
M[24] * Ltemp[19] + (-(M[25] * Ltemp[17]))) + D[8] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))) + D[8] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]) + D[10] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + D[11] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + D[11] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[13] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))))) + (-(D[13] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]))) + (-(D[15] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]))) + D[26] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[
18] + Mtemp[23] * Ltemp[17] + (-(M[25] * L[21]))) + D[26] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (
-(Mtemp[22] * Ltemp[18])) + Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))) + D[28] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] +
M[24] * Ltemp[17] + M[25] * Ltemp[19]) + (-(D[29] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] *
Ltemp[19] + (-(M[25] * Ltemp[17]))))) + (-(D[30] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[22] * L[
24] + (-(M[24] * Ltemp[22]))))); # e2 ^ einf
R[12] = D[0] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23]))) + (-(D[6] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))))) + D[7] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]) + D[9] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))) + (-(D[9] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]))) + D[10] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + D[12] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + (-(D[12] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] * Ltemp[
19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]))) + (-(D[14] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))))) + D[14] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]) + D[15] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[
23] * L[25] + (-(M[25] * Ltemp[23]))) + D[27] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[
18] + Mtemp[23] * Ltemp[17] + (-(M[25] * L[21]))) + (-(D[27] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) +
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))))) + (-(D[28] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))))) + D[29] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]) + D[30] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]); # e2 ^ e0
R[13] = D[0] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[22] * L[
24] + (-(M[24] * Ltemp[22]))) + (-(D[6] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] *
Ltemp[19] + (-(M[25] * Ltemp[17]))))) + (-(D[7] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] *
Ltemp[17] + M[25] * Ltemp[19]))) + D[8] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[
18] + Mtemp[23] * Ltemp[17] + (-(M[25] * L[21]))) + D[8] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (
-(Mtemp[22] * Ltemp[18])) + Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))) + (-(D[10] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]))) + D[11] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))) + D[11] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]) + D[13] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + D[13] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[15] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[22] * L[
24] + (-(M[24] * Ltemp[22]))))) + (-(D[26] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))))) + (-(D[26] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]))) + D[28] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) +
M[24] * Ltemp[19] + (-(M[25] * Ltemp[17]))) + D[29] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] +
M[24] * Ltemp[17] + M[25] * Ltemp[19]) + D[30] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]); # e3 ^ einf
R[14] = D[0] * ((-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + (-(D[6] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]))) + (-(D[7] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))))) + D[9] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[
18] + Mtemp[23] * Ltemp[17] + (-(M[25] * L[21]))) + (-(D[9] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) +
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))))) + (-(D[10] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23]))))) + D[12] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))) + (-(D[12] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]))) + D[14] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + (-(D[14] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] * Ltemp[
19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]))) + D[15] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + (-(D[27] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))))) + D[27] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]) + (-(D[28] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]))) + (-(D[29] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))))) + (-(D[30] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23]))))); # e3 ^ e0
R[15] = D[0] * ((-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] * Ltemp[
19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[6] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]))) + (-(D[7] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) +
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))))) + (-(D[8] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))))) + D[9] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] +
M[24] * Ltemp[17] + M[25] * Ltemp[19]) + (-(D[10] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]))) + (-(D[11] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23]))))) + D[12] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]) + (-(D[13] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]))) + D[14] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + D[15] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + D[26] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]) + (-(D[27] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] *
Ltemp[19] + (-(M[25] * Ltemp[17]))))) + (-(D[28] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))))) + (-(D[29] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[
23] * Ltemp[17] + (-(M[25] * L[21]))))) + (-(D[30] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))))); # einf ^ e0
R[26] = D[0] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] *
Ltemp[19] + (-(M[25] * Ltemp[17]))) + D[6] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + (-(D[7] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]))) + D[8] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))) + D[8] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]) + D[10] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] +
M[24] * Ltemp[17] + M[25] * Ltemp[19]) + (-(D[11] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[
23] * Ltemp[17] + (-(M[25] * L[21]))))) + (-(D[11] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) +
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))))) + D[13] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))) + D[13] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]) + (-(D[15] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] *
Ltemp[19] + (-(M[25] * Ltemp[17]))))) + D[26] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + D[26] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[28] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[22] * L[
24] + (-(M[24] * Ltemp[22]))))) + D[29] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]) + (-(D[30] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] *
Ltemp[17] + M[25] * Ltemp[19]))); # e1 ^ (e2 ^ (e3 ^ einf))
R[27] = D[0] * (Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]) + D[6] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + (-(D[7] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23]))))) + D[9] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))) + (-(D[9] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]))) + D[10] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))) + (-(D[12] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[
23] * Ltemp[17] + (-(M[25] * L[21]))))) + D[12] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (
-(Mtemp[22] * Ltemp[18])) + Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))) + D[14] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))) + (-(D[14] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]))) + D[15] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]) + D[27] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + (-(D[27] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] * Ltemp[
19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]))) + D[28] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]) + (-(D[29] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23]))))) + D[30] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))); # e1 ^ (e2 ^ (e3 ^ e0))
R[28] = D[0] * (M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]) + D[6] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[7] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]))) + (-(D[8] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[23] * L[
25] + (-(M[25] * Ltemp[23]))))) + D[9] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]) + D[10] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (
-(Mtemp[22] * Ltemp[18])) + Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))) + D[11] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))) + (-(D[12] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] *
Ltemp[17] + M[25] * Ltemp[19]))) + (-(D[13] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]))) + D[14] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) +
M[24] * Ltemp[19] + (-(M[25] * Ltemp[17]))) + D[15] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))) + (-(D[26] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]))) + D[27] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + D[28] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + (-(D[29] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))))) + D[30] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[
18] + Mtemp[23] * Ltemp[17] + (-(M[25] * L[21]))); # e1 ^ (e2 ^ (einf ^ e0))
R[29] = D[0] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) +
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))) + D[6] * (
M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]) + D[7] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[8] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]))) + D[9] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + (-(D[10] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]))) + D[11] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]) + (-(D[12] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) + M[24] *
Ltemp[19] + (-(M[25] * Ltemp[17]))))) + D[13] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))) + (-(D[14] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] + M[24] *
Ltemp[17] + M[25] * Ltemp[19]))) + D[15] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[
18] + Mtemp[23] * Ltemp[17] + (-(M[25] * L[21]))) + D[26] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[
23] * L[25] + (-(M[25] * Ltemp[23]))) + (-(D[27] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]))) + D[28] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))) + D[29] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]) + (-(D[30] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))))); # e1 ^ (e3 ^ (einf ^ e0))
R[30] = D[0] * (M[16] * L[21] + (-(Mtemp[17] * Ltemp[20])) + Mtemp[18] * Ltemp[19] + Mtemp[19] * Ltemp[18] + (
-(Mtemp[20] * Ltemp[17])) + M[21] * L[16]) + (-(D[6] * (
(-(M[16] * L[24])) + Mtemp[17] * Ltemp[23] + (-(Mtemp[18] * Ltemp[22])) + (-(Mtemp[22] * Ltemp[18])) +
Mtemp[23] * Ltemp[17] + (-(M[24] * L[16]))))) + D[7] * (
M[16] * L[25] + (-(Mtemp[19] * Ltemp[23])) + Mtemp[20] * Ltemp[22] + Mtemp[22] * Ltemp[20] + (
-(Mtemp[23] * Ltemp[19])) + M[25] * L[16]) + (-(D[8] * (
Mtemp[18] * L[25] + (-(Mtemp[20] * L[24])) + M[21] * Ltemp[23] + Mtemp[23] * L[21] + (
-(M[24] * Ltemp[20])) + M[25] * Ltemp[18]))) + D[9] * (
(-(Mtemp[17] * L[25])) + Mtemp[19] * L[24] + (-(M[21] * Ltemp[22])) + (-(Mtemp[22] * L[21])) +
M[24] * Ltemp[19] + (-(M[25] * Ltemp[17]))) + D[10] * (
(-(Mtemp[17] * Ltemp[18])) + Mtemp[18] * Ltemp[17] + (-(Mtemp[19] * Ltemp[20])) + Mtemp[20] *
Ltemp[19] + (-(Mtemp[22] * Ltemp[23])) + Mtemp[23] * Ltemp[22]) + (-(D[11] * (
(-(M[16] * Ltemp[18])) + Mtemp[18] * L[16] + (-(Mtemp[20] * L[21])) + M[21] * Ltemp[20] + (
-(Mtemp[23] * L[24])) + M[24] * Ltemp[23]))) + D[12] * (
(-(M[16] * Ltemp[17])) + Mtemp[17] * L[16] + Mtemp[19] * L[21] + (-(M[21] * Ltemp[19])) + Mtemp[
22] * L[24] + (-(M[24] * Ltemp[22]))) + D[13] * (
M[16] * Ltemp[20] + (-(Mtemp[18] * L[21])) + (-(Mtemp[20] * L[16])) + M[21] * Ltemp[18] + Mtemp[
23] * L[25] + (-(M[25] * Ltemp[23]))) + (-(D[14] * (
M[16] * Ltemp[19] + Mtemp[17] * L[21] + (-(Mtemp[19] * L[16])) + (-(M[21] * Ltemp[17])) + (
-(Mtemp[22] * L[25])) + M[25] * Ltemp[22]))) + D[15] * (
Mtemp[17] * Ltemp[20] + Mtemp[18] * Ltemp[19] + (-(Mtemp[19] * Ltemp[18])) + (
-(Mtemp[20] * Ltemp[17])) + M[24] * L[25] + (-(M[25] * L[24]))) + (-(D[26] * (
(-(M[16] * Ltemp[23])) + Mtemp[18] * L[24] + Mtemp[20] * L[25] + Mtemp[23] * L[16] + (
-(M[24] * Ltemp[18])) + (-(M[25] * Ltemp[20]))))) + D[27] * (
(-(M[16] * Ltemp[22])) + (-(Mtemp[17] * L[24])) + (-(Mtemp[19] * L[25])) + Mtemp[22] * L[16] +
M[24] * Ltemp[17] + M[25] * Ltemp[19]) + (-(D[28] * (
(-(Mtemp[17] * Ltemp[23])) + (-(Mtemp[18] * Ltemp[22])) + M[21] * L[25] + Mtemp[22] * Ltemp[18] + Mtemp[
23] * Ltemp[17] + (-(M[25] * L[21]))))) + D[29] * (
Mtemp[19] * Ltemp[23] + Mtemp[20] * Ltemp[22] + M[21] * L[24] + (-(Mtemp[22] * Ltemp[20])) + (
-(Mtemp[23] * Ltemp[19])) + (-(M[24] * L[21]))) + D[30] * (
1.0 + (-(M[16] * L[16])) + Mtemp[17] * Ltemp[18] + Mtemp[18] * Ltemp[17] + Mtemp[19] * Ltemp[
20] + Mtemp[20] * Ltemp[19] + M[21] * L[21] + Mtemp[22] * Ltemp[23] + Mtemp[23] * Ltemp[22] + M[
24] * L[24] + M[25] * L[25]); # e2 ^ (e3 ^ (einf ^ e0))
return V
def run_benchmarks():
ak.connect(connect_url='tcp://andrej-X556UQ:5555')
# ak.connect(connect_url='tcp://nid01551:5555')
test_pd_array_one = ak.randint(0, 100, 32)
test_pd_array_two = ak.randint(0, 100, 32)
test_pd_array_three = ak.randint(0, 100, 32)
start = time.perf_counter()
print(test_pd_array_one)
test_pd_array_one = radix_sort(test_pd_array_one)
print(test_pd_array_one)
end = time.perf_counter()
print(f"sort_v1 took {end - start:0.9f} seconds")
start = time.perf_counter()
print(test_pd_array_two)
sort_v2(test_pd_array_two)
print(test_pd_array_two)
end = time.perf_counter()
print(f"sort_v2 took {end - start:0.9f} seconds")
#
# start = time.perf_counter()
#print(cse(test_pd_array_two, test_pd_array_three))
# print(test_pd_array_three)
# end = time.perf_counter()
# print(f"cse took {end - start:0.9f} seconds")
ak.shutdown()
run_benchmarks()
# sort_v1 took 117.116571640 seconds
# sort_v2 took 36.270469763 seconds
# cse took 5.724895335 seconds
| 93.955166
| 122
| 0.284121
| 21,874
| 144,597
| 1.874463
| 0.008046
| 0.031316
| 0.029267
| 0.019901
| 0.940247
| 0.929223
| 0.92054
| 0.913663
| 0.907151
| 0.902127
| 0
| 0.207522
| 0.354917
| 144,597
| 1,538
| 123
| 94.016255
| 0.232051
| 0.013659
| 0
| 0.637407
| 0
| 0
| 0.000716
| 0.000168
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003376
| false
| 0
| 0.002026
| 0
| 0.008103
| 0.004051
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
da4a47e2a58eecbff008ac5ee5be9c3c69e85b13
| 43
|
py
|
Python
|
test/include_lv0.py
|
pavelkukov/fsimport
|
7020b941259fe78e20829029b9aa828d2de8300d
|
[
"MIT"
] | null | null | null |
test/include_lv0.py
|
pavelkukov/fsimport
|
7020b941259fe78e20829029b9aa828d2de8300d
|
[
"MIT"
] | null | null | null |
test/include_lv0.py
|
pavelkukov/fsimport
|
7020b941259fe78e20829029b9aa828d2de8300d
|
[
"MIT"
] | null | null | null |
def get_text():
return 'text from lv0'
| 14.333333
| 26
| 0.651163
| 7
| 43
| 3.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.232558
| 43
| 2
| 27
| 21.5
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0.302326
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e55f955f4ce84d43e85212b0bc7649d546810e41
| 19,411
|
py
|
Python
|
spacy/lang/it/tag_map.py
|
algteam/spacy_zh_model
|
0b0cba1a3964aa426e5f96087849c90e69e2a89d
|
[
"MIT"
] | 5
|
2019-04-19T06:27:29.000Z
|
2019-12-02T13:30:47.000Z
|
spacy/lang/it/tag_map.py
|
algteam/spacy_zh_model
|
0b0cba1a3964aa426e5f96087849c90e69e2a89d
|
[
"MIT"
] | null | null | null |
spacy/lang/it/tag_map.py
|
algteam/spacy_zh_model
|
0b0cba1a3964aa426e5f96087849c90e69e2a89d
|
[
"MIT"
] | 2
|
2019-04-19T06:27:18.000Z
|
2019-10-04T12:39:15.000Z
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import POS, PUNCT, SYM, ADJ, NUM, DET, ADV, ADP, X, VERB
from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON, SCONJ, AUX, CONJ
TAG_MAP = {
"AP__Gender=Fem|Number=Plur|Poss=Yes|PronType=Prs": {POS: DET},
"AP__Gender=Fem|Number=Sing|Poss=Yes|PronType=Prs": {POS: DET},
"AP__Gender=Masc|Number=Plur|Poss=Yes|PronType=Prs": {POS: DET},
"AP__Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs": {POS: DET},
"AP__Gender=Masc|Poss=Yes|PronType=Prs": {POS: DET},
"AP__Number=Sing|Poss=Yes|PronType=Prs": {POS: DET},
"AP__Poss=Yes|PronType=Prs": {POS: DET},
"A__Degree=Abs|Gender=Fem|Number=Plur": {POS: ADJ},
"A__Degree=Abs|Gender=Fem|Number=Sing": {POS: ADJ},
"A__Degree=Abs|Gender=Masc|Number=Plur": {POS: ADJ},
"A__Degree=Abs|Gender=Masc|Number=Sing": {POS: ADJ},
"A__Degree=Cmp": {POS: ADJ},
"A__Degree=Cmp|Number=Plur": {POS: ADJ},
"A__Degree=Cmp|Number=Sing": {POS: ADJ},
"A__Gender=Fem|Number=Plur": {POS: ADJ},
"A__Gender=Fem|Number=Sing": {POS: ADJ},
"A__Gender=Fem|Number=Sing|Poss=Yes|PronType=Prs": {POS: ADJ},
"A__Gender=Masc": {POS: ADJ},
"A__Gender=Masc|Number=Plur": {POS: ADJ},
"A__Gender=Masc|Number=Sing": {POS: ADJ},
"A__Number=Plur": {POS: ADJ},
"A__Number=Sing": {POS: ADJ},
"A___": {POS: ADJ},
"BN__PronType=Neg": {POS: ADV},
"B__Degree=Abs": {POS: ADV},
"B__Degree=Abs|Gender=Masc|Number=Sing": {POS: ADV},
"B___": {POS: ADV},
"CC___": {POS: CONJ},
"CS___": {POS: SCONJ},
"DD__Gender=Fem|Number=Plur|PronType=Dem": {POS: DET},
"DD__Gender=Fem|Number=Sing|PronType=Dem": {POS: DET},
"DD__Gender=Masc|Number=Plur|PronType=Dem": {POS: DET},
"DD__Gender=Masc|Number=Sing|PronType=Dem": {POS: DET},
"DD__Gender=Masc|PronType=Dem": {POS: DET},
"DD__Number=Plur|PronType=Dem": {POS: DET},
"DD__Number=Sing|PronType=Dem": {POS: DET},
"DE__PronType=Exc": {POS: DET},
"DI__Definite=Def|Gender=Fem|Number=Plur|PronType=Art": {POS: DET},
"DI__Gender=Fem|Number=Plur": {POS: DET},
"DI__Gender=Fem|Number=Plur|PronType=Ind": {POS: DET},
"DI__Gender=Fem|Number=Sing|PronType=Ind": {POS: DET},
"DI__Gender=Masc|Number=Plur": {POS: DET},
"DI__Gender=Masc|Number=Plur|PronType=Ind": {POS: DET},
"DI__Gender=Masc|Number=Sing|PronType=Ind": {POS: DET},
"DI__Number=Sing|PronType=Art": {POS: DET},
"DI__Number=Sing|PronType=Ind": {POS: DET},
"DI__PronType=Ind": {POS: DET},
"DQ__Gender=Fem|Number=Plur|PronType=Int": {POS: DET},
"DQ__Gender=Fem|Number=Sing|PronType=Int": {POS: DET},
"DQ__Gender=Masc|Number=Plur|PronType=Int": {POS: DET},
"DQ__Gender=Masc|Number=Sing|PronType=Int": {POS: DET},
"DQ__Number=Plur|PronType=Int": {POS: DET},
"DQ__Number=Sing|PronType=Int": {POS: DET},
"DQ__PronType=Int": {POS: DET},
"DQ___": {POS: DET},
"DR__Number=Plur|PronType=Rel": {POS: DET},
"DR__PronType=Rel": {POS: DET},
"E__Gender=Masc|Number=Sing": {POS: ADP},
"E___": {POS: ADP},
"FB___": {POS: PUNCT},
"FC___": {POS: PUNCT},
"FF___": {POS: PUNCT},
"FS___": {POS: PUNCT},
"I__Polarity=Neg": {POS: INTJ},
"I__Polarity=Pos": {POS: INTJ},
"I___": {POS: INTJ},
"NO__Gender=Fem|Number=Plur|NumType=Ord": {POS: ADJ},
"NO__Gender=Fem|Number=Sing|NumType=Ord": {POS: ADJ},
"NO__Gender=Masc|Number=Plur": {POS: ADJ},
"NO__Gender=Masc|Number=Plur|NumType=Ord": {POS: ADJ},
"NO__Gender=Masc|Number=Sing|NumType=Ord": {POS: ADJ},
"NO__NumType=Ord": {POS: ADJ},
"NO__Number=Sing|NumType=Ord": {POS: ADJ},
"NO___": {POS: ADJ},
"N__Gender=Masc|Number=Sing": {POS: NUM},
"N__NumType=Card": {POS: NUM},
"N__NumType=Range": {POS: NUM},
"N___": {POS: NUM},
"PART___": {POS: PART},
"PC__Clitic=Yes|Definite=Def|Gender=Fem|Number=Plur|PronType=Art": {POS: PRON},
"PC__Clitic=Yes|Gender=Fem|Number=Plur|Person=3|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Gender=Fem|Number=Plur|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Gender=Fem|Number=Sing|Person=3|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Gender=Fem|Person=3|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Gender=Masc|Number=Plur|Person=3|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Gender=Masc|Number=Sing|Person=3|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Gender=Masc|Number=Sing|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Number=Plur|Person=1|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Number=Plur|Person=2|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Number=Plur|Person=3|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Number=Plur|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Number=Sing|Person=1|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Number=Sing|Person=2|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Number=Sing|Person=3|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|Person=3|PronType=Prs": {POS: PRON},
"PC__Clitic=Yes|PronType=Prs": {POS: PRON},
"PD__Gender=Fem|Number=Plur|PronType=Dem": {POS: PRON},
"PD__Gender=Fem|Number=Sing|PronType=Dem": {POS: PRON},
"PD__Gender=Masc|Number=Plur|PronType=Dem": {POS: PRON},
"PD__Gender=Masc|Number=Sing|PronType=Dem": {POS: PRON},
"PD__Number=Plur|PronType=Dem": {POS: PRON},
"PD__Number=Sing|PronType=Dem": {POS: PRON},
"PD__PronType=Dem": {POS: PRON},
"PE__Gender=Fem|Number=Plur|Person=3|PronType=Prs": {POS: PRON},
"PE__Gender=Fem|Number=Sing|Person=3|PronType=Prs": {POS: PRON},
"PE__Gender=Masc|Number=Plur|Person=3|PronType=Prs": {POS: PRON},
"PE__Gender=Masc|Number=Sing|Person=3|PronType=Prs": {POS: PRON},
"PE__Number=Plur|Person=1|PronType=Prs": {POS: PRON},
"PE__Number=Plur|Person=2|PronType=Prs": {POS: PRON},
"PE__Number=Plur|Person=3|PronType=Prs": {POS: PRON},
"PE__Number=Sing|Person=1|PronType=Prs": {POS: PRON},
"PE__Number=Sing|Person=2|PronType=Prs": {POS: PRON},
"PE__Number=Sing|Person=3|PronType=Prs": {POS: PRON},
"PE__Person=3|PronType=Prs": {POS: PRON},
"PE__PronType=Prs": {POS: PRON},
"PI__Gender=Fem|Number=Plur|PronType=Ind": {POS: PRON},
"PI__Gender=Fem|Number=Sing|PronType=Ind": {POS: PRON},
"PI__Gender=Masc|Number=Plur|PronType=Ind": {POS: PRON},
"PI__Gender=Masc|Number=Sing": {POS: PRON},
"PI__Gender=Masc|Number=Sing|PronType=Ind": {POS: PRON},
"PI__Number=Plur|PronType=Ind": {POS: PRON},
"PI__Number=Sing|PronType=Ind": {POS: PRON},
"PI__PronType=Ind": {POS: PRON},
"PP__Gender=Fem|Number=Sing|Poss=Yes|PronType=Prs": {POS: PRON},
"PP__Gender=Masc|Number=Plur|Poss=Yes|PronType=Prs": {POS: PRON},
"PP__Gender=Masc|Number=Sing|Poss=Yes|PronType=Prs": {POS: PRON},
"PP__Number=Plur|Poss=Yes|PronType=Prs": {POS: PRON},
"PP__Number=Sing|Poss=Yes|PronType=Prs": {POS: PRON},
"PQ__Gender=Fem|Number=Plur|PronType=Int": {POS: PRON},
"PQ__Gender=Fem|Number=Sing|PronType=Int": {POS: PRON},
"PQ__Gender=Masc|Number=Plur|PronType=Int": {POS: PRON},
"PQ__Gender=Masc|Number=Sing|PronType=Int": {POS: PRON},
"PQ__Number=Plur|PronType=Int": {POS: PRON},
"PQ__Number=Sing|PronType=Int": {POS: PRON},
"PQ__PronType=Int": {POS: PRON},
"PR__Gender=Masc|Number=Plur|PronType=Rel": {POS: PRON},
"PR__Gender=Masc|Number=Sing|PronType=Rel": {POS: PRON},
"PR__Gender=Masc|PronType=Rel": {POS: PRON},
"PR__Number=Plur|PronType=Rel": {POS: PRON},
"PR__Number=Sing|PronType=Rel": {POS: PRON},
"PR__Person=3|PronType=Rel": {POS: PRON},
"PR__PronType=Rel": {POS: PRON},
"RD__Definite=Def": {POS: DET},
"RD__Definite=Def|Gender=Fem": {POS: DET},
"RD__Definite=Def|Gender=Fem|Number=Plur|PronType=Art": {POS: DET},
"RD__Definite=Def|Gender=Fem|Number=Sing|PronType=Art": {POS: DET},
"RD__Definite=Def|Gender=Masc|Number=Plur|PronType=Art": {POS: DET},
"RD__Definite=Def|Gender=Masc|Number=Sing|PronType=Art": {POS: DET},
"RD__Definite=Def|Number=Plur|PronType=Art": {POS: DET},
"RD__Definite=Def|Number=Sing|PronType=Art": {POS: DET},
"RD__Definite=Def|PronType=Art": {POS: DET},
"RD__Gender=Fem|Number=Sing": {POS: DET},
"RD__Gender=Masc|Number=Sing": {POS: DET},
"RD__Number=Sing": {POS: DET},
"RD__Number=Sing|PronType=Art": {POS: DET},
"RI__Definite=Ind|Gender=Fem|Number=Plur|PronType=Art": {POS: DET},
"RI__Definite=Ind|Gender=Fem|Number=Sing|PronType=Art": {POS: DET},
"RI__Definite=Ind|Gender=Masc|Number=Plur|PronType=Art": {POS: DET},
"RI__Definite=Ind|Gender=Masc|Number=Sing|PronType=Art": {POS: DET},
"RI__Definite=Ind|Number=Sing|PronType=Art": {POS: DET},
"RI__Definite=Ind|PronType=Art": {POS: DET},
"SP__Gender=Fem|Number=Plur": {POS: PROPN},
"SP__NumType=Card": {POS: PROPN},
"SP___": {POS: PROPN},
"SW__Foreign=Yes": {POS: X},
"SW__Foreign=Yes|Gender=Masc": {POS: X},
"SW__Foreign=Yes|Number=Sing": {POS: X},
"SYM___": {POS: SYM},
"S__Gender=Fem": {POS: NOUN},
"S__Gender=Fem|Number=Plur": {POS: NOUN},
"S__Gender=Fem|Number=Sing": {POS: NOUN},
"S__Gender=Masc": {POS: NOUN},
"S__Gender=Masc|Number=Plur": {POS: NOUN},
"S__Gender=Masc|Number=Sing": {POS: NOUN},
"S__Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part": {POS: NOUN},
"S__Number=Plur": {POS: NOUN},
"S__Number=Sing": {POS: NOUN},
"S___": {POS: NOUN},
"Sw___": {POS: X},
"T__Gender=Fem|Number=Plur|PronType=Tot": {POS: DET},
"T__Gender=Fem|Number=Sing": {POS: DET},
"T__Gender=Fem|Number=Sing|PronType=Tot": {POS: DET},
"T__Gender=Masc|Number=Plur|PronType=Tot": {POS: DET},
"T__Gender=Masc|Number=Sing|PronType=Tot": {POS: DET},
"T__Number=Plur|PronType=Tot": {POS: DET},
"T__PronType=Tot": {POS: DET},
"VA__Gender=Fem|Number=Plur|Tense=Past|VerbForm=Part": {POS: AUX},
"VA__Gender=Fem|Number=Sing|Tense=Past|VerbForm=Part": {POS: AUX},
"VA__Gender=Masc|Number=Plur|Tense=Past|VerbForm=Part": {POS: AUX},
"VA__Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part": {POS: AUX},
"VA__Mood=Cnd|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Cnd|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Cnd|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Cnd|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=1|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=2|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=2|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=3|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=3|Tense=Past|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=1|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=1|Tense=Past|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=2|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=3|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin": {POS: AUX},
"VA__Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Sub|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VA__Mood=Sub|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__Mood=Sub|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VA__Mood=Sub|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VA__Mood=Sub|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VA__VerbForm=Ger": {POS: AUX},
"VA__VerbForm=Inf": {POS: AUX},
"VM__Gender=Fem|Number=Sing|Tense=Past|VerbForm=Part": {POS: AUX},
"VM__Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part": {POS: AUX},
"VM__Mood=Cnd|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Cnd|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Cnd|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Cnd|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Cnd|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Cnd|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Imp|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Imp|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Plur|Person=1|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Plur|Person=2|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Plur|Person=3|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Plur|Person=3|Tense=Past|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Sing|Person=2|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Sing|Person=2|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Sing|Person=3|Tense=Fut|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin": {POS: AUX},
"VM__Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Sub|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VM__Mood=Sub|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VM__Mood=Sub|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__Mood=Sub|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {POS: AUX},
"VM__Mood=Sub|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: AUX},
"VM__VerbForm=Ger": {POS: AUX},
"VM__VerbForm=Inf": {POS: AUX},
"V__Gender=Fem|Number=Plur|Tense=Past|VerbForm=Part": {POS: VERB},
"V__Gender=Fem|Number=Sing|Tense=Past|VerbForm=Part": {POS: VERB},
"V__Gender=Masc|Number=Plur|Tense=Past|VerbForm=Fin": {POS: VERB},
"V__Gender=Masc|Number=Plur|Tense=Past|VerbForm=Part": {POS: VERB},
"V__Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part": {POS: VERB},
"V__Mood=Cnd|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Cnd|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Cnd|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Cnd|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Cnd|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Cnd|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Imp|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Imp|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Imp|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Imp|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=1|Tense=Fut|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=1|Tense=Past|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=2|Tense=Fut|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=3|Tense=Fut|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=3|Tense=Past|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=1|Tense=Fut|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=1|Tense=Past|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=2|Tense=Fut|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=3|Tense=Fut|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Ind|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Plur|Person=2|Tense=Imp|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {POS: VERB},
"V__Mood=Sub|Number=Sing|Person=3|VerbForm=Fin": {POS: VERB},
"V__Number=Plur|Tense=Pres|VerbForm=Part": {POS: VERB},
"V__Number=Sing|Tense=Pres|VerbForm=Part": {POS: VERB},
"V__Tense=Past|VerbForm=Part": {POS: VERB},
"V__VerbForm=Ger": {POS: VERB},
"V__VerbForm=Inf": {POS: VERB},
"X___": {POS: X},
"_SP": {POS: SPACE}
}
| 59.910494
| 84
| 0.663335
| 3,020
| 19,411
| 4.047682
| 0.036424
| 0.104712
| 0.122546
| 0.086224
| 0.930792
| 0.90126
| 0.873691
| 0.764316
| 0.666558
| 0.605612
| 0
| 0.007715
| 0.131884
| 19,411
| 323
| 85
| 60.095975
| 0.717702
| 0.000618
| 0
| 0
| 0
| 0
| 0.652511
| 0.619797
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009404
| 0
| 0.009404
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e5b9fc1ed9ccb2e33f860192f1815ce8ad608e3d
| 6,644
|
py
|
Python
|
tests/sapcai/apis/request/models/test_response.py
|
SAPConversationalAI/SDK-python
|
68cf1502ce61d1930db7f3bf623114f7d7990ce5
|
[
"MIT"
] | 9
|
2019-02-07T06:57:55.000Z
|
2022-02-23T07:18:31.000Z
|
tests/sapcai/apis/request/models/test_response.py
|
SAPConversationalAI/SDK-python
|
68cf1502ce61d1930db7f3bf623114f7d7990ce5
|
[
"MIT"
] | 1
|
2020-06-26T18:31:18.000Z
|
2021-08-31T17:23:46.000Z
|
tests/sapcai/apis/request/models/test_response.py
|
SAPConversationalAI/SDK-python
|
68cf1502ce61d1930db7f3bf623114f7d7990ce5
|
[
"MIT"
] | 9
|
2019-01-15T10:40:50.000Z
|
2021-06-17T00:10:27.000Z
|
# coding: utf-8
import json
from sapcai import Response
from sapcai import Intent
from sapcai import Entity
class TestResponse(object):
def test_instanciable(self):
Response(json.dumps({'results': {'uuid': 'db4837b0-8359-4505-9678-c4081a6f2ad8', 'source': 'What is the weather in London tomorrow? And in Paris?', 'intents': [{'slug': 'weather', 'confidence': 0.67}], 'act': 'wh-query', 'type': 'desc:desc', 'sentiment': 'neutral', 'entities': {'action': [{'agent': 'the weather in London', 'tense': 'present', 'raw': 'is', 'confidence': 0.89}], 'location': [{'formated': 'London, London, Greater London, England, United Kingdom', 'lat': 51.5073509, 'lng': -0.1277583, 'raw': 'London', 'confidence': 0.97}, {'formated': 'Paris, Paris, Île-de-France, France', 'lat': 48.856614, 'lng': 2.3522219, 'raw': 'Paris', 'confidence': 0.83}], 'datetime': [{'iso': '2016-07-11T10:00:00+00:00', 'raw': 'tomorrow', 'confidence': 0.95}]}, 'language': 'en', 'processing_language': 'en', 'version': '2.1.1', 'timestamp': '2016-07-10T23:17:59+02:00', 'status': 200}, 'message': 'Requests rendered with success'}))
def test_attributes(self):
response = Response(json.dumps({'results': {'uuid': 'db4837b0-8359-4505-9678-c4081a6f2ad8', 'source': 'What is the weather in London tomorrow? And in Paris?', 'intents': [{'slug': 'weather', 'confidence': 0.67}], 'act': 'wh-query', 'type': 'desc:desc', 'sentiment': 'neutral', 'entities': {'action': [{'agent': 'the weather in London', 'tense': 'present', 'raw': 'is', 'confidence': 0.89}], 'location': [{'formated': 'London, London, Greater London, England, United Kingdom', 'lat': 51.5073509, 'lng': -0.1277583, 'raw': 'London', 'confidence': 0.97}, {'formated': 'Paris, Paris, Île-de-France, France', 'lat': 48.856614, 'lng': 2.3522219, 'raw': 'Paris', 'confidence': 0.83}], 'datetime': [{'iso': '2016-07-11T10:00:00+00:00', 'raw': 'tomorrow', 'confidence': 0.95}]}, 'language': 'en', 'processing_language': 'en', 'version': '2.1.1', 'timestamp': '2016-07-10T23:17:59+02:00', 'status': 200}, 'message': 'Requests rendered with success'}))
assert(response.raw == json.dumps({'results': {'uuid': 'db4837b0-8359-4505-9678-c4081a6f2ad8', 'source': 'What is the weather in London tomorrow? And in Paris?', 'intents': [{'slug': 'weather', 'confidence': 0.67}], 'act': 'wh-query', 'type': 'desc:desc', 'sentiment': 'neutral', 'entities': {'action': [{'agent': 'the weather in London', 'tense': 'present', 'raw': 'is', 'confidence': 0.89}], 'location': [{'formated': 'London, London, Greater London, England, United Kingdom', 'lat': 51.5073509, 'lng': -0.1277583, 'raw': 'London', 'confidence': 0.97}, {'formated': 'Paris, Paris, Île-de-France, France', 'lat': 48.856614, 'lng': 2.3522219, 'raw': 'Paris', 'confidence': 0.83}], 'datetime': [{'iso': '2016-07-11T10:00:00+00:00', 'raw': 'tomorrow', 'confidence': 0.95}]}, 'language': 'en', 'processing_language': 'en', 'version': '2.1.1', 'timestamp': '2016-07-10T23:17:59+02:00', 'status': 200}, 'message': 'Requests rendered with success'}))
assert(response.source == 'What is the weather in London tomorrow? And in Paris?')
assert(type(response.intents) is list)
assert(type(response.intents[0]) is Intent)
assert(response.act == 'wh-query')
assert(response.type == 'desc:desc')
assert(response.sentiment == 'neutral')
assert(type(response.entities) is list)
assert(type(response.entities[0]) is Entity)
assert(response.language == 'en')
assert(response.processing_language == 'en')
assert(response.version == '2.1.1')
assert(response.timestamp == '2016-07-10T23:17:59+02:00')
assert(response.status == 200)
def test_helpers(self):
response = Response(json.dumps({'results': {'uuid': 'db4837b0-8359-4505-9678-c4081a6f2ad8', 'source': 'What is the weather in London tomorrow? And in Paris?', 'intents': [{'slug': 'weather', 'confidence': 0.67}], 'act': 'wh-query', 'type': 'desc:desc', 'sentiment': 'neutral', 'entities': {'action': [{'agent': 'the weather in London', 'tense': 'present', 'raw': 'is', 'confidence': 0.89}], 'location': [{'formated': 'London, London, Greater London, England, United Kingdom', 'lat': 51.5073509, 'lng': -0.1277583, 'raw': 'London', 'confidence': 0.97}, {'formated': 'Paris, Paris, Île-de-France, France', 'lat': 48.856614, 'lng': 2.3522219, 'raw': 'Paris', 'confidence': 0.83}], 'datetime': [{'iso': '2016-07-11T10:00:00+00:00', 'raw': 'tomorrow', 'confidence': 0.95}]}, 'language': 'en', 'processing_language': 'en', 'version': '2.1.1', 'timestamp': '2016-07-10T23:17:59+02:00', 'status': 200}, 'message': 'Requests rendered with success'}))
assert(response.intent() == response.intents[0])
entity = None
for e in response.entities:
if e.name.lower() == 'location':
entity = e
break
assert(response.get('location') == entity)
entities = []
for e in response.entities:
if e.name.lower() == 'location':
entities.append(e)
assert(response.all('location') == entities)
assert(response.is_assert() is False)
assert(response.is_command() is False)
assert(response.is_wh_query() is True)
assert(response.is_yn_query() is False)
assert(response.is_abbreviation() is False)
assert(response.is_entity() is False)
assert(response.is_description() is True)
assert(response.is_human() is False)
assert(response.is_location() is False)
assert(response.is_number() is False)
assert(response.is_vpositive() is False)
assert(response.is_positive() is False)
assert(response.is_neutral() is True)
assert(response.is_negative() is False)
assert(response.is_vnegative() is False)
def test_missing_array(self):
response = Response(json.dumps({'results': {'uuid': 'db4837b0-8359-4505-9678-c4081a6f2ad8', 'source': 'What is the weather in London tomorrow? And in Paris?', 'intents': [], 'act': 'wh-query', 'type': 'desc:desc', 'sentiment': 'neutral', 'entities': {'action': [{'agent': 'the weather in London', 'tense': 'present', 'raw': 'is', 'confidence': 0.89}], 'location': [{'formated': 'London, London, Greater London, England, United Kingdom', 'lat': 51.5073509, 'lng': -0.1277583, 'raw': 'London', 'confidence': 0.97}, {'formated': 'Paris, Paris, Île-de-France, France', 'lat': 48.856614, 'lng': 2.3522219, 'raw': 'Paris', 'confidence': 0.83}], 'datetime': [{'iso': '2016-07-11T10:00:00+00:00', 'raw': 'tomorrow', 'confidence': 0.95}]}, 'language': 'en', 'processing_language': 'en', 'version': '2.1.1', 'timestamp': '2016-07-10T23:17:59+02:00', 'status': 200}, 'message': 'Requests rendered with success'}))
assert(response.intent() is None)
| 102.215385
| 948
| 0.652017
| 886
| 6,644
| 4.857788
| 0.142212
| 0.094331
| 0.055762
| 0.046004
| 0.828067
| 0.742797
| 0.742797
| 0.742797
| 0.736292
| 0.736292
| 0
| 0.102339
| 0.124925
| 6,644
| 64
| 949
| 103.8125
| 0.637943
| 0.001957
| 0
| 0.109091
| 0
| 0
| 0.45663
| 0.068638
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.072727
| false
| 0
| 0.072727
| 0
| 0.163636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e5e7eecbb5c389b0bd104435bdcc130302db71ac
| 285
|
py
|
Python
|
tests/fixtures/devalidation/B.py
|
Nitnelav/synpp
|
b2b2136a99701ce77fd4fea939f8efb521f67c21
|
[
"MIT"
] | 6
|
2020-04-01T12:06:20.000Z
|
2021-11-02T19:10:27.000Z
|
tests/fixtures/devalidation/B.py
|
Nitnelav/synpp
|
b2b2136a99701ce77fd4fea939f8efb521f67c21
|
[
"MIT"
] | 26
|
2019-12-08T12:25:39.000Z
|
2022-02-28T07:24:56.000Z
|
tests/fixtures/devalidation/B.py
|
Nitnelav/synpp
|
b2b2136a99701ce77fd4fea939f8efb521f67c21
|
[
"MIT"
] | 8
|
2020-06-19T15:49:46.000Z
|
2021-07-06T10:15:37.000Z
|
def configure(context):
context.stage("tests.fixtures.devalidation.A1")
context.stage("tests.fixtures.devalidation.A2")
def execute(context):
a1 = context.stage("tests.fixtures.devalidation.A1")
a2 = context.stage("tests.fixtures.devalidation.A2")
return a1 + a2
| 28.5
| 56
| 0.722807
| 35
| 285
| 5.885714
| 0.314286
| 0.23301
| 0.330097
| 0.485437
| 0.76699
| 0.76699
| 0
| 0
| 0
| 0
| 0
| 0.032389
| 0.133333
| 285
| 9
| 57
| 31.666667
| 0.801619
| 0
| 0
| 0
| 0
| 0
| 0.421053
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
970e2d95250579c5f3f3d0215b166d1cf1c473e0
| 6,078
|
py
|
Python
|
test/api/test_api_actors.py
|
bplusv/ufs-casting-agency
|
87f7835de142d1cf528cfab1d0219258d3938510
|
[
"MIT"
] | null | null | null |
test/api/test_api_actors.py
|
bplusv/ufs-casting-agency
|
87f7835de142d1cf528cfab1d0219258d3938510
|
[
"MIT"
] | 1
|
2020-10-08T18:46:06.000Z
|
2020-10-08T18:47:14.000Z
|
test/api/test_api_actors.py
|
bplusv/ufs-casting-agency
|
87f7835de142d1cf528cfab1d0219258d3938510
|
[
"MIT"
] | 1
|
2020-10-13T01:51:00.000Z
|
2020-10-13T01:51:00.000Z
|
from flask_sqlalchemy import SQLAlchemy
from src.models import Actor
from src.auth import UserRole
db = SQLAlchemy()
def test_get_actor(client, auth):
actor_id = 1
res = client.get(f'/api/actors/{actor_id}', headers={
'Authorization': auth.bearer_token(UserRole.CASTING_ASSISTANT)})
data = res.get_json()
assert res.status_code == 200
assert 'id' in data
assert 'name' in data
assert 'age' in data
assert 'gender' in data
assert 'movies' in data
def test_get_actor_not_found(client, auth):
actor_id = 99
res = client.get(f'api/actors/{actor_id}', headers={
'Authorization': auth.bearer_token(UserRole.CASTING_ASSISTANT)})
data = res.get_json()
assert res.status_code == 404
assert data['success'] is False
assert data['error'] == 404
assert data['message'] == 'Actor not found'
def test_get_actors(client, auth):
res = client.get('/api/actors', headers={
'Authorization': auth.bearer_token(UserRole.CASTING_ASSISTANT)})
data = res.get_json()
assert res.status_code == 200
assert isinstance(data, list)
for actor in data:
assert 'id' in actor
assert 'name' in actor
assert 'age' in actor
assert 'gender' in actor
assert 'movies' in actor
def test_get_actors_not_found(client, auth):
Actor.query.delete()
db.session.commit()
res = client.get('/api/actors', headers={
'Authorization': auth.bearer_token(UserRole.CASTING_ASSISTANT)})
data = res.get_json()
assert res.status_code == 404
assert isinstance(data, dict)
assert data['success'] is False
assert data['error'] == 404
assert data['message'] == 'No Actors added yet'
def test_delete_actor(client, auth):
actor_id = 1
res = client.delete(f'/api/actors/{actor_id}', headers={
'Authorization': auth.bearer_token(UserRole.CASTING_DIRECTOR)})
data = res.get_json()
assert res.status_code == 200
assert isinstance(data, dict)
assert data['success'] is True
assert Actor.query.get(actor_id) is None
def test_delete_actor_not_found(client, auth):
actor_id = 99
res = client.delete(f'/api/actors/{actor_id}', headers={
'Authorization': auth.bearer_token(UserRole.CASTING_DIRECTOR)})
data = res.get_json()
assert res.status_code == 404
assert isinstance(data, dict)
assert data['success'] is False
assert data['error'] == 404
assert data['message'] == 'Actor not found'
def test_post_actor_without_movies(client, auth):
post_data = {
'name': 'Lisa Mcdowell',
'age': 70,
'gender': 'female'
}
res = client.post('/api/actors', json=post_data, headers={
'Authorization': auth.bearer_token(UserRole.CASTING_DIRECTOR)})
data = res.get_json()
assert res.status_code == 200
assert isinstance(data, dict)
assert data['success'] is True
assert 'actor_id' in data
actor = Actor.query.get(data['actor_id'])
assert actor is not None
assert actor.name == post_data['name']
assert actor.age == post_data['age']
assert actor.gender.name.lower() == post_data['gender']
def test_post_actor_with_movies(client, auth):
post_data = {
'name': 'Lisa Mcdowell',
'age': 70,
'gender': 'female',
'movies': [2, 1]
}
res = client.post('/api/actors', json=post_data, headers={
'Authorization': auth.bearer_token(UserRole.CASTING_DIRECTOR)})
data = res.get_json()
assert res.status_code == 200
assert isinstance(data, dict)
assert data['success'] is True
assert 'actor_id' in data
actor = Actor.query.get(data['actor_id'])
assert actor is not None
assert actor.name == post_data['name']
assert actor.age == post_data['age']
assert actor.gender.name.lower() == post_data['gender']
assert set(movie.id for movie in actor.movies) == set(post_data['movies'])
def test_post_actor_unprocessable(client, auth):
post_data = {
'name': 'Angela Rubius',
'age': None,
'gender': None
}
res = client.post('/api/actors', json=post_data, headers={
'Authorization': auth.bearer_token(UserRole.CASTING_DIRECTOR)})
data = res.get_json()
assert res.status_code == 422
assert isinstance(data, dict)
assert data['success'] is False
assert data['error'] == 422
assert data['message'] == 'Unprocessable request to add new Actor'
def test_patch_actor_with_name_age(client, auth):
actor_id = 2
patch_data = {
'name': 'Jane Gainwell',
'age': 21
}
res = client.patch(f'/api/actors/{actor_id}', json=patch_data, headers={
'Authorization': auth.bearer_token(UserRole.CASTING_DIRECTOR)})
data = res.get_json()
assert res.status_code == 200
assert isinstance(data, dict)
assert data['success'] is True
actor = Actor.query.get(actor_id)
assert actor.name == patch_data['name']
assert actor.age == patch_data['age']
def test_patch_actor_with_gender_movies(client, auth):
actor_id = 2
patch_data = {
'gender': 'female',
'movies': [2, 1]
}
res = client.patch(f'/api/actors/{actor_id}', json=patch_data, headers={
'Authorization': auth.bearer_token(UserRole.CASTING_DIRECTOR)})
data = res.get_json()
assert res.status_code == 200
assert isinstance(data, dict)
assert data['success'] is True
actor = Actor.query.get(actor_id)
assert actor.gender.name.lower() == patch_data['gender']
assert set(movie.id for movie in actor.movies) == set(patch_data['movies'])
def test_patch_actor_not_found(client, auth):
actor_id = 99
patch_data = {
'age': 10
}
res = client.patch(f'/api/actors/{actor_id}', json=patch_data, headers={
'Authorization': auth.bearer_token(UserRole.CASTING_DIRECTOR)})
data = res.get_json()
assert res.status_code == 404
assert isinstance(data, dict)
assert data['success'] is False
assert data['error'] == 404
assert data['message'] == 'Actor not found'
| 32.15873
| 79
| 0.65696
| 814
| 6,078
| 4.738329
| 0.103194
| 0.038113
| 0.074669
| 0.093337
| 0.824994
| 0.788955
| 0.788955
| 0.767695
| 0.747472
| 0.747472
| 0
| 0.015231
| 0.211418
| 6,078
| 188
| 80
| 32.329787
| 0.789485
| 0
| 0
| 0.658385
| 0
| 0
| 0.141165
| 0.025173
| 0
| 0
| 0
| 0
| 0.42236
| 1
| 0.074534
| false
| 0
| 0.018634
| 0
| 0.093168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8add1020a6e3068b9d3b313aa6f82e7f1f80fdd7
| 4,032
|
py
|
Python
|
tarpan/plot/kde_test.py
|
evgenyneu/tarpan
|
34a5dd8d98b09341e28946ef6aa03da03e62cf1c
|
[
"Unlicense"
] | 2
|
2020-10-12T15:12:09.000Z
|
2022-03-01T03:10:12.000Z
|
tarpan/plot/kde_test.py
|
evgenyneu/tarpan
|
34a5dd8d98b09341e28946ef6aa03da03e62cf1c
|
[
"Unlicense"
] | 1
|
2022-03-01T13:05:54.000Z
|
2022-03-01T13:05:54.000Z
|
tarpan/plot/kde_test.py
|
evgenyneu/tarpan
|
34a5dd8d98b09341e28946ef6aa03da03e62cf1c
|
[
"Unlicense"
] | null | null | null |
import os
import shutil
import pytest
from pytest import approx
from tarpan.plot.kde import (
gaussian_kde, save_scatter_and_kde, scatter_and_kde)
def test_kde_with_uncerts():
result = gaussian_kde([-10.1, 9.8], [-10, 10], [1.4, 1.8])
assert result.shape[0] == 2
assert result[0] == approx(0.14211638124953146, rel=1e-15)
assert result[1] == approx(0.11013534965419111, rel=1e-15)
def test_kde_with_uncerts_unequal_data():
with pytest.raises(ValueError):
gaussian_kde([-10.1, 9.8], [-10, 10], [1.4, 1.8, 4.3])
def test_kde_with_uncerts_empty_arrays():
result = gaussian_kde([-10.1, 9.8], [], [])
assert result.shape[0] == 0
# save_scatter_and_kde
# ----------------------
def test_save_scatter_and_kde_single():
outdir = "tarpan/plot/model_info/kde_test"
if os.path.isdir(outdir):
shutil.rmtree(outdir)
values1 = [
-1.22, -1.15, -0.97, -0.68, -0.37, -0.48, -0.73, -0.61, -1.32,
-0.62, -1.13, -0.65, -0.90, -1.29, -1.19, -0.54, -0.64, -0.45,
-1.21, -0.75, -0.66, -0.71, -0.61, -0.59, -1.07, -0.65, -0.59]
uncertainties1 = [
0.13, 0.14, 0.17, 0.07, 0.11, 0.12, 0.23, 0.05, 0.04,
0.30, 0.11, 0.13, 0.16, 0.03, 0.18, 0.20, 0.16, 0.16,
0.11, 0.09, 0.20, 0.10, 0.08, 0.04, 0.04, 0.23, 0.19]
save_scatter_and_kde(values=[values1], uncertainties=[uncertainties1],
title="Sodium abundances in RGB stars of NGC 288",
xlabel="Sodium abundance [Na/H]",
ylabel=["Star number", "Probability density"])
assert os.path.isfile(os.path.join(outdir, "scatter_kde.pdf"))
def test_save_scatter_and_kde_multiple():
outdir = "tarpan/plot/model_info/kde_test"
if os.path.isdir(outdir):
shutil.rmtree(outdir)
values1 = [-0.99, -1.37, -1.38, -1.51, -1.29, -1.34, -1.50, -0.93, -0.83,
-1.46, -1.07, -1.28, -0.73]
uncertainties1 = [0.12, 0.05, 0.11, 0.18, 0.03, 0.19, 0.18, 0.12, 0.19,
0.09, 0.11, 0.16, 0.08]
values2 = [
-1.22, -1.15, -0.97, -0.68, -0.37, -0.48, -0.73, -0.61, -1.32,
-0.62, -1.13, -0.65, -0.90, -1.29, -1.19, -0.54, -0.64, -0.45,
-1.21, -0.75, -0.66, -0.71, -0.61, -0.59, -1.07, -0.65, -0.59]
uncertainties2 = [
0.13, 0.14, 0.17, 0.07, 0.11, 0.12, 0.23, 0.05, 0.04,
0.30, 0.11, 0.13, 0.16, 0.03, 0.18, 0.20, 0.16, 0.16,
0.11, 0.09, 0.20, 0.10, 0.08, 0.04, 0.04, 0.23, 0.19]
fig, axes = save_scatter_and_kde(
values=[values1, values2],
uncertainties=[uncertainties1, uncertainties2],
title="Sodium abundances in RGB stars of NGC 288",
xlabel="Sodium abundance [Na/H]",
ylabel=["Star number", "Probability density"],
legend_labels=["AGB", "RGB"])
assert os.path.isfile(os.path.join(outdir, "scatter_kde.pdf"))
assert len(axes) == 2
def test_scatter_and_kde_multiple():
values1 = [-0.99, -1.37, -1.38, -1.51, -1.29, -1.34, -1.50, -0.93, -0.83,
-1.46, -1.07, -1.28, -0.73]
uncertainties1 = [0.12, 0.05, 0.11, 0.18, 0.03, 0.19, 0.18, 0.12, 0.19,
0.09, 0.11, 0.16, 0.08]
values2 = [
-1.22, -1.15, -0.97, -0.68, -0.37, -0.48, -0.73, -0.61, -1.32,
-0.62, -1.13, -0.65, -0.90, -1.29, -1.19, -0.54, -0.64, -0.45,
-1.21, -0.75, -0.66, -0.71, -0.61, -0.59, -1.07, -0.65, -0.59]
uncertainties2 = [
0.13, 0.14, 0.17, 0.07, 0.11, 0.12, 0.23, 0.05, 0.04,
0.30, 0.11, 0.13, 0.16, 0.03, 0.18, 0.20, 0.16, 0.16,
0.11, 0.09, 0.20, 0.10, 0.08, 0.04, 0.04, 0.23, 0.19]
fig, axes = scatter_and_kde(
values=[values1, values2],
uncertainties=[uncertainties1, uncertainties2],
title="Sodium abundances in RGB stars of NGC 288",
xlabel="Sodium abundance [Na/H]",
ylabel=["Star number", "Probability density"],
legend_labels=["AGB", "RGB"])
assert len(axes) == 2
| 34.758621
| 77
| 0.53373
| 727
| 4,032
| 2.884457
| 0.174691
| 0.018598
| 0.024797
| 0.048641
| 0.831187
| 0.771578
| 0.732475
| 0.719123
| 0.719123
| 0.719123
| 0
| 0.249834
| 0.251488
| 4,032
| 115
| 78
| 35.06087
| 0.444997
| 0.010665
| 0
| 0.675
| 0
| 0
| 0.096839
| 0.015554
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.075
| false
| 0
| 0.0625
| 0
| 0.1375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c18a8d6533415393ef43e981358d63f791711f25
| 178
|
py
|
Python
|
selene/api/__init__.py
|
KalinkinaMaria/selene
|
859e1102c85740b52af8d0f08dd6b6490b4bd2ff
|
[
"MIT"
] | null | null | null |
selene/api/__init__.py
|
KalinkinaMaria/selene
|
859e1102c85740b52af8d0f08dd6b6490b4bd2ff
|
[
"MIT"
] | 1
|
2021-06-02T04:21:17.000Z
|
2021-06-02T04:21:17.000Z
|
selene/api/__init__.py
|
vkarpenko/selene
|
4776357430c940be38f38be9981006dd156f9730
|
[
"MIT"
] | null | null | null |
from selene import config, browser, browsers
from selene.support.jquery_style_selectors import s, ss
from selene.support import by
from selene.support.conditions import be, have
| 35.6
| 55
| 0.837079
| 27
| 178
| 5.444444
| 0.592593
| 0.272109
| 0.346939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11236
| 178
| 4
| 56
| 44.5
| 0.93038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c1d004f6223a8592517685b422d9b579431df979
| 54
|
py
|
Python
|
__init__.py
|
Hiddenstorm/DiscAPI
|
172c535b99f0888a575e2c5b95a5c42333862057
|
[
"MIT"
] | 1
|
2022-02-21T21:56:49.000Z
|
2022-02-21T21:56:49.000Z
|
__init__.py
|
Hiddenstorm/DiscAPI
|
172c535b99f0888a575e2c5b95a5c42333862057
|
[
"MIT"
] | null | null | null |
__init__.py
|
Hiddenstorm/DiscAPI
|
172c535b99f0888a575e2c5b95a5c42333862057
|
[
"MIT"
] | null | null | null |
from .DiscAPI import Client
from .DiscAPI import Guild
| 27
| 27
| 0.833333
| 8
| 54
| 5.625
| 0.625
| 0.488889
| 0.755556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 2
| 28
| 27
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c1fe825a8c2d1e7b9eec6628c87038caa46341b1
| 9
|
py
|
Python
|
python/testData/psi/QualifiedTarget.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 331
|
2020-05-27T21:54:25.000Z
|
2022-03-28T04:26:25.000Z
|
python/testData/psi/QualifiedTarget.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 187
|
2020-05-24T00:45:12.000Z
|
2022-03-30T20:22:05.000Z
|
python/testData/psi/QualifiedTarget.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 50
|
2020-09-07T06:13:11.000Z
|
2022-03-30T19:38:29.000Z
|
a.b = 23
| 4.5
| 8
| 0.444444
| 3
| 9
| 1.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.333333
| 9
| 1
| 9
| 9
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e707054fbe855d1ddb61f486f3b512f224905251
| 15,310
|
py
|
Python
|
raiden_contracts/tests/fixtures/channel_test_values.py
|
marcosmartinez7/lumino-contracts
|
700d6cb6b4c90d0173b3d206238fd31a02dcb9bd
|
[
"MIT"
] | 3
|
2019-06-12T14:50:12.000Z
|
2020-12-25T07:25:23.000Z
|
raiden_contracts/tests/fixtures/channel_test_values.py
|
marcosmartinez7/lumino-contracts
|
700d6cb6b4c90d0173b3d206238fd31a02dcb9bd
|
[
"MIT"
] | 2
|
2019-12-08T21:06:56.000Z
|
2021-01-21T02:44:58.000Z
|
raiden_contracts/tests/fixtures/channel_test_values.py
|
marcosmartinez7/lumino-contracts
|
700d6cb6b4c90d0173b3d206238fd31a02dcb9bd
|
[
"MIT"
] | 5
|
2019-06-12T14:02:07.000Z
|
2020-12-10T11:21:37.000Z
|
from raiden_contracts.tests.utils import MAX_UINT256, ChannelValues, LockedAmounts
# We must cover the edge cases documented in
# https://github.com/raiden-network/raiden-contracts/issues/188
# The scope is to make sure that if someone uses an old balance proof, this cannot be used as
# an attack to steal tokens.
# For invalid balance proofs (created and signed with an unofficial Raiden client),
# we cannot determine and guarantee corectness. There are specific constraints that the
# Raiden client must enforce that guarantee correctness.
# For all valid last balance proofs provided we have manually constructed a suit of old balance
# proof pairs that could be used for attacks (documented edge cases).
# We will test that using old balance proofs does not result in cheating, therefore the attacker
# (participant who provides the old balance proof) must not receive more tokens than in the case
# where he provides a `valid last` balance proof.
channel_settle_test_values = [
{
"valid_last": (
ChannelValues(
deposit=35,
withdrawn=5,
transferred=20020,
locked_amounts=LockedAmounts(claimable_locked=3, unclaimable_locked=1),
),
ChannelValues(
deposit=40,
withdrawn=10,
transferred=20030,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=2),
),
),
# participant2 provides a valid but old balance proof of participant1
"old_last": [
# participant2 does not send participant1's balance proof
ChannelValues(deposit=35, withdrawn=5, transferred=0, locked_amounts=LockedAmounts()),
# participant2 provides an old participant1 balance proof with a smaller
# transferred amount
ChannelValues(
deposit=35,
withdrawn=5,
transferred=10000,
locked_amounts=LockedAmounts(claimable_locked=3, unclaimable_locked=1),
),
# participant2 provides an old participant1 balance proof with a smaller
# claimable locked amount
ChannelValues(
deposit=35,
withdrawn=5,
transferred=20020,
locked_amounts=LockedAmounts(claimable_locked=0, unclaimable_locked=1),
),
# participant2 provides an old participant1 balance proof with a smaller
# unclaimable locked amount
ChannelValues(
deposit=35,
withdrawn=5,
transferred=20020,
locked_amounts=LockedAmounts(claimable_locked=3, unclaimable_locked=0),
),
# participant2 provides an old participant1 balance proof with a smaller transferred
# & claimable locked amount
ChannelValues(
deposit=35,
withdrawn=5,
transferred=10000,
locked_amounts=LockedAmounts(claimable_locked=0, unclaimable_locked=1),
),
# participant2 provides an old participant1 balance proof will all values smaller
ChannelValues(
deposit=35,
withdrawn=5,
transferred=10000,
locked_amounts=LockedAmounts(claimable_locked=0, unclaimable_locked=0),
),
# participant2 provides an old participant1 balance proof, but with the same
# transferred + claimable_locked
# This should have the same final participant balances as the valid last balance proofs
# locked amount cannot be bigger than the available deposit at that time
# 18 is the maximum locked amount that can happen with valid balance proofs
ChannelValues(
deposit=35,
withdrawn=5,
transferred=20006,
locked_amounts=LockedAmounts(claimable_locked=17, unclaimable_locked=1),
),
# participant2 provides an old participant1 balance proof, with a higher
# unclaimable locked amount can happen if expired transfers are removed
# from the merkle tree
ChannelValues(
deposit=35,
withdrawn=5,
transferred=20020,
locked_amounts=LockedAmounts(claimable_locked=3, unclaimable_locked=12),
),
# participant2 provides an old participant1 balance proof with a higher
# claimable locked amount, but lower transferred + claimable_locked
# A higher claimable locked amount can happen even if the locked tokens are
# eventually claimed off-chain (become transferred amount).
# This is because we can register secrets on-chain at any point in time.
ChannelValues(
deposit=35,
withdrawn=5,
transferred=10020,
locked_amounts=LockedAmounts(claimable_locked=17, unclaimable_locked=1),
),
# participant2 provides an old participant1 balance proof with a higher
# unclaimable locked amount and a higher claimable locked amount but lower
# transferred + claimable_locked
ChannelValues(
deposit=35,
withdrawn=5,
transferred=10020,
locked_amounts=LockedAmounts(claimable_locked=17, unclaimable_locked=10),
),
],
# participant1 provides a valid but old participant2 balance proof
# these examples must maintain the same order of calculating the balances
# imposed by tranferred2 + locked2 >= transferred1 + locked1
"last_old": [
# participant1 provides an old participant2 balance proof with a smaller
# transferred amount
ChannelValues(
deposit=40,
withdrawn=10,
transferred=20020,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=2),
),
# participant1 provides an old participant2 balance proof with a smaller
# claimable locked amount
ChannelValues(
deposit=40,
withdrawn=10,
transferred=20030,
locked_amounts=LockedAmounts(claimable_locked=0, unclaimable_locked=2),
),
# participant1 provides an old participant2 balance proof with a smaller
# unclaimable locked amount
ChannelValues(
deposit=40,
withdrawn=10,
transferred=20030,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=0),
),
# participant1 provides an old participant2 balance proof with a smaller transferred
# & claimable locked amount
ChannelValues(
deposit=40,
withdrawn=10,
transferred=20022,
locked_amounts=LockedAmounts(claimable_locked=0, unclaimable_locked=2),
),
# participant1 provides an old participant2 balance proof will all values smaller
ChannelValues(
deposit=40,
withdrawn=10,
transferred=20024,
locked_amounts=LockedAmounts(claimable_locked=0, unclaimable_locked=0),
),
# participant1 provides an old participant2 balance proof, but with the same
# transferred + claimable_locked
# This should have the same final participant balances as the valid last balance proofs
ChannelValues(
deposit=40,
withdrawn=10,
transferred=19994,
locked_amounts=LockedAmounts(claimable_locked=40, unclaimable_locked=2),
),
# participant1 provides an old participant2 balance proof, with a higher
# unclaimable locked amount
# can happen if expired transfers are removed from the merkle tree
ChannelValues(
deposit=40,
withdrawn=10,
transferred=20030,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=20),
),
# participant1 provides an old participant2 balance proof with a higher
# claimable locked amount,
# but lower transferred + claimable_locked
# A higher claimable locked amount can happen even if the locked tokens are
# eventually claimed off-chain (become transferred amount).
# This is because we can register secrets on-chain at any point in time.
ChannelValues(
deposit=40,
withdrawn=10,
transferred=19990,
locked_amounts=LockedAmounts(claimable_locked=40, unclaimable_locked=2),
),
# participant1 provides an old participant2 balance proof with a higher
# unclaimable locked amount and a higher claimable locked amount but lower
# transferred + claimable_locked
ChannelValues(
deposit=40,
withdrawn=10,
transferred=19990,
locked_amounts=LockedAmounts(claimable_locked=40, unclaimable_locked=10),
),
],
},
{
# neither participants provide balance proofs
"valid_last": (
ChannelValues(deposit=40, withdrawn=10, transferred=0),
ChannelValues(deposit=35, withdrawn=5, transferred=0),
)
},
{
# both balance proofs provided are valid
"valid_last": (
ChannelValues(
deposit=35,
withdrawn=5,
transferred=20,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=0),
),
ChannelValues(
deposit=40,
withdrawn=10,
transferred=30,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=2),
),
)
},
{
# Participants have withdrawn all their tokens already
"valid_last": (
ChannelValues(deposit=5, withdrawn=15, transferred=20),
ChannelValues(deposit=20, withdrawn=10, transferred=30),
)
},
{
# Participants have withdrawn all their finalized transfer tokens except locked,
"valid_last": (
ChannelValues(
deposit=5,
withdrawn=5,
transferred=20,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=1),
),
ChannelValues(
deposit=25,
withdrawn=5,
transferred=30,
locked_amounts=LockedAmounts(claimable_locked=2, unclaimable_locked=3),
),
)
},
]
channel_settle_invalid_test_values = [
(
# bigger locked amounts than what remains in the contract after settlement
ChannelValues(
deposit=35,
withdrawn=5,
transferred=20020,
locked_amounts=LockedAmounts(claimable_locked=30000000, unclaimable_locked=10000000),
),
ChannelValues(
deposit=40,
withdrawn=10,
transferred=20030,
locked_amounts=LockedAmounts(claimable_locked=10000000, unclaimable_locked=40000000),
),
),
# participant2 does not provide a balance proof + locked amount too big
(
ChannelValues(
deposit=35,
withdrawn=5,
transferred=0,
locked_amounts=LockedAmounts(claimable_locked=0, unclaimable_locked=0),
),
ChannelValues(
deposit=40,
withdrawn=10,
transferred=30,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=2),
),
),
# Participants have withdrawn all their finalized transfer tokens already,
# only locked tokens left
(
ChannelValues(
deposit=5,
withdrawn=10,
transferred=20,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=1),
),
ChannelValues(
deposit=20,
withdrawn=5,
transferred=30,
locked_amounts=LockedAmounts(claimable_locked=2, unclaimable_locked=3),
),
),
(
ChannelValues(deposit=5, withdrawn=5, transferred=20),
ChannelValues(deposit=10, withdrawn=10, transferred=30),
),
(
ChannelValues(
deposit=5,
withdrawn=5,
transferred=20,
locked_amounts=LockedAmounts(claimable_locked=1, unclaimable_locked=3),
),
ChannelValues(
deposit=10,
withdrawn=10,
transferred=30,
locked_amounts=LockedAmounts(claimable_locked=2, unclaimable_locked=4),
),
),
# overflow on transferred amounts
(
ChannelValues(
deposit=35,
withdrawn=5,
transferred=MAX_UINT256 - 15,
locked_amounts=LockedAmounts(claimable_locked=3, unclaimable_locked=1),
),
ChannelValues(
deposit=40,
withdrawn=10,
transferred=MAX_UINT256 - 5,
locked_amounts=LockedAmounts(claimable_locked=5, unclaimable_locked=1),
),
),
# overflow on transferred amount
(
ChannelValues(
deposit=35,
withdrawn=5,
transferred=0,
locked_amounts=LockedAmounts(claimable_locked=4, unclaimable_locked=0),
),
ChannelValues(
deposit=40,
withdrawn=10,
transferred=MAX_UINT256 - 5,
locked_amounts=LockedAmounts(claimable_locked=0, unclaimable_locked=6),
),
),
# overflow on transferred amount
(
ChannelValues(
deposit=40,
withdrawn=10,
transferred=0,
locked_amounts=LockedAmounts(claimable_locked=6, unclaimable_locked=0),
),
ChannelValues(
deposit=35,
withdrawn=5,
transferred=MAX_UINT256 - 15,
locked_amounts=LockedAmounts(claimable_locked=1, unclaimable_locked=3),
),
),
# overflow on transferred amount
(
ChannelValues(
deposit=35,
withdrawn=5,
transferred=20020,
locked_amounts=LockedAmounts(claimable_locked=200000, unclaimable_locked=200),
),
ChannelValues(deposit=40, withdrawn=10, transferred=MAX_UINT256 - 5),
),
# overflow on transferred amount, overflow on netted transfer + deposit
(
ChannelValues(
deposit=35,
withdrawn=5,
transferred=20,
locked_amounts=LockedAmounts(claimable_locked=200, unclaimable_locked=200000),
),
ChannelValues(deposit=40, withdrawn=10, transferred=MAX_UINT256 - 5),
),
]
| 40.183727
| 99
| 0.591182
| 1,429
| 15,310
| 6.229531
| 0.142057
| 0.094361
| 0.119748
| 0.157268
| 0.800045
| 0.793642
| 0.774657
| 0.746686
| 0.717479
| 0.684228
| 0
| 0.052763
| 0.346375
| 15,310
| 380
| 100
| 40.289474
| 0.836814
| 0.302874
| 0
| 0.805369
| 0
| 0
| 0.006231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003356
| 0
| 0.003356
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e73ed25eb52a90c0527f857bdddfbb88c84a3e63
| 219
|
py
|
Python
|
rasa_nlu/extractors/__init__.py
|
dharampal/rasa_nlu
|
202b9041393a3f0e5667e3a33e18c661bd695232
|
[
"Apache-2.0"
] | 1
|
2019-06-12T08:21:32.000Z
|
2019-06-12T08:21:32.000Z
|
rasa_nlu/extractors/__init__.py
|
dharampal/rasa_nlu
|
202b9041393a3f0e5667e3a33e18c661bd695232
|
[
"Apache-2.0"
] | null | null | null |
rasa_nlu/extractors/__init__.py
|
dharampal/rasa_nlu
|
202b9041393a3f0e5667e3a33e18c661bd695232
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import object
class EntityExtractor(object):
pass
| 21.9
| 39
| 0.853881
| 27
| 219
| 6.222222
| 0.518519
| 0.238095
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13242
| 219
| 9
| 40
| 24.333333
| 0.884211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.142857
| 0.714286
| 0
| 0.857143
| 0.142857
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
82163a8090773c006ca92acdb063527b642818c0
| 131
|
py
|
Python
|
api/src/application/socrata/errors.py
|
iliaskaras/housing-units
|
2b0d78fcb42629ce7530b2da556ebd550cc7bec8
|
[
"MIT"
] | null | null | null |
api/src/application/socrata/errors.py
|
iliaskaras/housing-units
|
2b0d78fcb42629ce7530b2da556ebd550cc7bec8
|
[
"MIT"
] | null | null | null |
api/src/application/socrata/errors.py
|
iliaskaras/housing-units
|
2b0d78fcb42629ce7530b2da556ebd550cc7bec8
|
[
"MIT"
] | null | null | null |
from application.infrastructure.error.errors import ValidationError
class SocrataDatasetDownloadError(ValidationError):
pass
| 21.833333
| 67
| 0.854962
| 11
| 131
| 10.181818
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099237
| 131
| 5
| 68
| 26.2
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
68bf06663792e6431427bfd9d213fb42644b4d97
| 1,629
|
py
|
Python
|
kameleon_rks/densities/test_gaussian.py
|
ingmarschuster/kameleon_rks
|
129b8ee4151d310dd7a1bdf454eb58bb80c43435
|
[
"MIT"
] | 3
|
2018-08-26T09:09:29.000Z
|
2021-04-16T06:08:27.000Z
|
kameleon_rks/densities/test_gaussian.py
|
ingmarschuster/kameleon_rks
|
129b8ee4151d310dd7a1bdf454eb58bb80c43435
|
[
"MIT"
] | null | null | null |
kameleon_rks/densities/test_gaussian.py
|
ingmarschuster/kameleon_rks
|
129b8ee4151d310dd7a1bdf454eb58bb80c43435
|
[
"MIT"
] | 1
|
2019-10-24T22:22:39.000Z
|
2019-10-24T22:22:39.000Z
|
from numpy.testing.utils import assert_allclose
from kameleon_rks.densities.gaussian import log_gaussian_pdf_multiple, \
log_gaussian_pdf
import numpy as np
def test_log_gaussian_pdf_multiple_equals_log_gaussian_pdf_looped_full_cov():
N = 100
D = 3
X = np.random.randn(N, D)
mu = np.random.randn(D)
L_C = np.linalg.cholesky(np.dot(X.T, X) + np.eye(D))
cov_scaling = 2.
log_pdfs = log_gaussian_pdf_multiple(X, mu, L_C, is_cholesky=True, cov_scaling=cov_scaling)
grads = log_gaussian_pdf_multiple(X, mu, L_C, is_cholesky=True, compute_grad=True, cov_scaling=cov_scaling)
print log_pdfs
for i, x in enumerate(X):
log_pdf = log_gaussian_pdf(x, mu, L_C, is_cholesky=True, cov_scaling=cov_scaling)
grad = log_gaussian_pdf(x, mu, L_C, is_cholesky=True, compute_grad=True, cov_scaling=cov_scaling)
assert_allclose(log_pdf, log_pdfs[i])
assert_allclose(grad, grads[i])
def test_log_gaussian_pdf_multiple_equals_log_gaussian_pdf_looped_isotropic_cov():
N = 100
D = 3
X = np.random.randn(N, D)
mu = np.random.randn(D)
cov_scaling = 2.
log_pdfs = log_gaussian_pdf_multiple(X, mu, cov_scaling=cov_scaling)
grads = log_gaussian_pdf_multiple(X, mu, compute_grad=True, cov_scaling=cov_scaling)
print log_pdfs
for i, x in enumerate(X):
log_pdf = log_gaussian_pdf(x, mu, cov_scaling=cov_scaling)
grad = log_gaussian_pdf(x, mu, compute_grad=True, cov_scaling=cov_scaling)
assert_allclose(log_pdf, log_pdfs[i])
assert_allclose(grad, grads[i])
| 33.9375
| 111
| 0.70043
| 261
| 1,629
| 4.011494
| 0.195402
| 0.17192
| 0.187202
| 0.152818
| 0.833811
| 0.833811
| 0.833811
| 0.833811
| 0.833811
| 0.833811
| 0
| 0.007734
| 0.206262
| 1,629
| 48
| 112
| 33.9375
| 0.802011
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 0
| null | null | 0
| 0.090909
| null | null | 0.060606
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
68c3b3b6e26f4e54ef6d893d76d9ef56ba50d500
| 49,824
|
py
|
Python
|
server/flaskServer/plotter.py
|
jufabeck2202/projektarbeit-iobased-login
|
61486e8ecef2c66717f1bd0838cba79eed6cd323
|
[
"MIT"
] | 1
|
2020-12-30T18:11:27.000Z
|
2020-12-30T18:11:27.000Z
|
server/flaskServer/plotter.py
|
SiggiSigmann/projektarbeit-iobased-login
|
61486e8ecef2c66717f1bd0838cba79eed6cd323
|
[
"MIT"
] | null | null | null |
server/flaskServer/plotter.py
|
SiggiSigmann/projektarbeit-iobased-login
|
61486e8ecef2c66717f1bd0838cba79eed6cd323
|
[
"MIT"
] | null | null | null |
import io
import random
import sys
import json
import networkx as nx
import datetime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import rcParams
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import dbconnector.dbconnector as dbcon
from subnet import Subnet
###
# create plots for website
###
class Plotter():
def __init__(self, datadb, subnet):
self.datadb = datadb
self.sub = subnet
self.font_size = 6
#create diagram corresponding to filename
def create_image(self, image_name, dark = 1):
if dark:
plt.style.use('dark_background')
rcParams.update({'figure.autolayout': True})
else:
plt.style.use('default')
rcParams.update({'figure.autolayout': True})
#create special image
if image_name == "measurements_frequenc.png":
fig = Figure()
fig = self.measurements_frequenc()
plt.close('all')
return fig
#create special image
if image_name == "measurements_amount.png":
fig = Figure()
fig = self.measurements_amount()
plt.close('all')
return fig
matplotlib.rc('xtick', labelsize=self.font_size)
matplotlib.rc('ytick', labelsize=self.font_size)
#0: name (total => all, name => only for this person)
#1: diagramtype
#2: diagramsubtype
#e.g. Total_2_2.png
#split filename
parts = image_name.split('_')
fig_number = int(parts[1])
fig_subplot =int(parts[2])
if len(parts) > 3:
from_date = parts[3]
end = parts[4].split(".")
to_date = end[0]
else:
from_date = self.datadb.get_first_measurement(parts[0])
now = datetime.datetime.now()
to_date = now.strftime("%Y-%m-%d")
#creat plot
fig = Figure()
if(fig_number == 0):
if(fig_subplot == 0):
fig = self.distance_between_measurement(parts[0], from_date, to_date)
elif(fig_subplot == 1):
fig = self.distance_between_measurement_minutes(parts[0], from_date, to_date)
elif(fig_subplot == 2):
fig = self.measurement_during_day(parts[0], from_date, to_date)
elif(fig_subplot == 3):
fig = self.measurement_during_hour(parts[0], from_date, to_date)
else:
fig = self._create_random_figure()
elif(fig_number == 1):
if(fig_subplot == 0):
fig = self.ip_distribution(parts[0], from_date, to_date)
elif(fig_subplot == 1):
fig = self.ip_distribution_trace(parts[0], from_date, to_date)
elif(fig_subplot == 2):
fig = self.isp_distribution(parts[0], from_date, to_date)
elif(fig_subplot == 3):
fig = self.isp_distribution_in_trace(parts[0], from_date, to_date)
else:
fig = self._create_random_figure()
elif(fig_number == 2):
if(fig_subplot== 0):
fig = self.ip_vs_hour(parts[0], from_date, to_date)
elif(fig_subplot == 1):
fig = self.ip_in_trace_vs_hour(parts[0], from_date, to_date)
elif(fig_subplot== 2):
fig = self.isp_vs_time(parts[0], from_date, to_date)
elif(fig_subplot== 3):
fig = self.isp_in_trace_vs_time(parts[0], from_date, to_date)
else:
fig = self._create_random_figure()
elif(fig_number == 3):
if(fig_subplot == 0):
fig = self.ip_change(parts[0], from_date, to_date)
elif(fig_subplot== 1):
fig = self.ip_change_vs_time(parts[0], from_date, to_date)
elif(fig_subplot== 2):
fig = self.ip_change_vs_time_vs_frequency(parts[0], from_date, to_date)
elif(fig_subplot== 3):
fig = self.isp_change(parts[0], from_date, to_date)
elif(fig_subplot == 4):
fig = self.isp_change_graph(parts[0], from_date, to_date, dark)
elif(fig_subplot == 5):
fig = self.isp_change_vs_hour(parts[0], from_date, to_date)
else:
fig = self._create_random_figure()
elif(fig_number == 4):
if(fig_subplot == 0):
fig = self.city_distribution(parts[0], from_date, to_date)
elif(fig_subplot== 1):
fig = self.city_vs_ip(parts[0], from_date, to_date)
elif(fig_subplot== 2):
fig = self.city_change(parts[0], from_date, to_date)
elif(fig_subplot== 3):
fig = self.city_change_vs_time_vs_frequency(parts[0], from_date, to_date)
elif(fig_subplot== 4):
fig = self.city_graph(parts[0], from_date, to_date, dark)
elif(fig_subplot== 5):
fig = self.city_vs_isp(parts[0], from_date, to_date)
else:
fig = self._create_random_figure()
else:
fig = self._create_random_figure()
plt.close('all')
return fig
#create image which shows how many measurements where taken in the last 20 days
def measurements_frequenc(self):
measurements=self.datadb.get_measurements_per_day_last_20()
measurement_per_person = {}
for i in measurements:
if i[0] not in measurement_per_person.keys():
measurement_per_person[i[0]] = [0 for k in range(20)]
if i[1] == 20:
continue
measurement_per_person[i[0]][19-i[1]] += i[2]
#create figure
fig, axis = plt.subplots()
for k in measurement_per_person.keys():
axis.plot(range(len(measurement_per_person[k])), measurement_per_person[k], label=k)
axis.legend(loc="lower left")
#description
#axis.set_title('Time between measurements (hour based)')
axis.set_xlabel('Days sice today')
#axis.set_ylabel('Percent')
#set how many lables where needed and text for it
axis.set_xticks(range(20))
axis.set_xticklabels([i-19 for i in range(20)])
return fig
#diagram shows how many measurements everon had in the last 20 days
def measurements_amount(self):
measurements=self.datadb.get_measurements_per_day_last_20()
persodata=self.datadb.get_persons()
measurement_per_person = {}
for i in measurements:
if i[0] not in measurement_per_person.keys():
measurement_per_person[i[0]] = [0 for k in range(20)]
measurement_per_person[i[0]][19-i[1]] += i[2]
aggregated = {}
for person in measurement_per_person.keys():
aggregated[person] = [0 for k in range(20)]
amount = 0
for p in persodata["persons"]:
if p["name"] == person:
amount = int(p["number"])
aggregated[person][19] = amount
for i in range(len(aggregated[person])-1):
aggregated[person][18-i] = aggregated[person][19-i] - measurement_per_person[person][19-i]
#create figure
fig, axis = plt.subplots()
for k in aggregated.keys():
axis.plot(range(len(aggregated[k])), aggregated[k], label=k)
axis.legend(loc="upper left")
#description
#axis.set_title('Time between measurements (hour based)')
axis.set_xlabel('Days sice today')
#axis.set_ylabel('Percent')
#set how many lables where needed and text for it
axis.set_xticks(range(20))
axis.set_xticklabels([i-19 for i in range(20)])
return fig
#create rondom plot
def _create_random_figure(self, person="total", dark=1):
fig, axis = plt.subplots()
xs = range(100)
ys = [random.randint(1, 50) for x in xs]
#axis.set_title('Smarts')
axis.set_xlabel('random')
axis.set_ylabel('random')
axis.plot(xs, ys)
return fig
#create plot that shows time between measurements
def distance_between_measurement(self, person, from_date, to_date):
#get timestamps from db
timestamps = self.datadb.get_person_timestamps(person, from_date, to_date)
#init count array
total_count=[0 for i in range(24)]
#calculate difference between two timestamps and count
for i in range(0,len(timestamps)-1):
t1 = int(timestamps[i][1].strftime("%H"))
t2 = int(timestamps[i+1][1].strftime("%H"))
idx = abs(t2-t1)
total_count[idx] = total_count[idx]+1
#calc percentage per entry
values=[0.0 for i in range(24)]
sum_total = sum(total_count)
#avoide devicion with 0
if sum_total == 0:
values = total_count
else:
#calc percentage
for i in range(len(total_count)):
values[i] = total_count[i] / sum_total
#create label
labels=[i for i in range(24)]
#create figure
fig, axis = plt.subplots()
axis.bar(labels, values)
#description
#axis.set_title('Time between measurements (hour based)')
axis.set_xlabel('Hours between measurements')
axis.set_ylabel('Percent')
#set how many lables where needed and text for it
axis.set_xticks(labels)
axis.set_xticklabels(labels)
return fig
# distance between measurements in minutes when they are less than one hour apart
def distance_between_measurement_minutes(self, person, from_date, to_date):
#get timestamps from db
timestamps = self.datadb.get_person_timestamps(person, from_date, to_date)
#init count array
total_count=[0 for i in range(60)]
#calculate difference between two timestamps and count
for i in range(0,len(timestamps)-1):
t1 = int(timestamps[i][1].strftime("%H"))
t2 = int(timestamps[i+1][1].strftime("%H"))
idx = abs(t2-t1)
#check if distance is 0 ( 0 hours apart)
if idx == 0:
t1 = int(timestamps[i][1].strftime("%M"))
t2 = int(timestamps[i+1][1].strftime("%M"))
idx = abs(t2-t1)
total_count[idx] = total_count[idx]+1
#calc percentage per entry
values=[0.0 for i in range(60)]
sum_total = sum(total_count)
#avoide devicion with 0
if sum_total == 0:
values = total_count
else:
#calc percentage
for i in range(len(total_count)):
values[i] = total_count[i] / sum_total
#create label
labels=[i for i in range(60)]
#create figure
fig, axis = plt.subplots()
axis.bar(labels, values)
#description
#axis.set_title('Time between measurements (hour based)')
axis.set_xlabel('Minutes between measurements')
axis.set_ylabel('Percent')
#set how many lables where needed and text for it
axis.set_xticks(labels)
axis.set_xticklabels(labels)
return fig
#create plot to display how many measurements where made per weekday
def measurement_during_day(self, person, from_date, to_date):
#get timestamps from db
timestamps = self.datadb.get_person_timestamps(person, from_date, to_date)
#init count array
total_count=[0 for i in range(7)]
#count weekdays
for i in range(0,len(timestamps)):
twday = int(timestamps[i][1].strftime("%w"))
total_count[twday] = total_count[twday]+1
#calc percentage per entry
values=[0.0 for i in range(7)]
sum_total = sum(total_count)
#avoide devicion with 0
if sum_total == 0:
values = total_count
else:
#calc percentage
for i in range(len(total_count)):
values[i] = total_count[i] / sum_total
#create label
labels=[i for i in range(7)]
#create figure
fig, axis = plt.subplots()
axis.bar(labels, values)
#description
#axis.set_title('Measurement Day')
#axis.set_xlabel('Week Day')
axis.set_ylabel('Percent')
#set how many lables where needed and text for it
axis.set_xticks(labels)
axis.set_xticklabels(["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"])
return fig
#create a plot which sohows measurement per hour
def measurement_during_hour(self, person, from_date, to_date):
#get timestamps from db
timestamps = self.datadb.get_person_timestamps(person, from_date, to_date)
#init count array
total_count=[0 for i in range(24)]
#calculate difference between two timestamps and count
for i in range(0,len(timestamps)):
t1 = int(timestamps[i][1].strftime("%H"))
idx = t1
total_count[idx] = total_count[idx]+1
#calc percentage per entry
values=[0.0 for i in range(24)]
sum_total = sum(total_count)
#avoide devicion with 0
if sum_total == 0:
values = total_count
else:
#calc percentage
for i in range(len(total_count)):
values[i] = total_count[i] / sum_total
#create label
labels=[i for i in range(24)]
#create figure
fig, axis = plt.subplots()
axis.bar(labels, values)
#description
#axis.set_title('Time between measurements (hour based)')
axis.set_xlabel('Time of day')
axis.set_ylabel('Percent')
#set how many lables where needed and text for it
axis.set_xticks(labels)
axis.set_xticklabels(labels)
return fig
#create diagram which shows ip adresses of the user and how often it was used
def ip_distribution(self, person, from_date, to_date):
ips = self.datadb.get_ip_address_distribution(person, from_date, to_date)
label = []
total = []
#fill array
for i in ips:
label.append(i[0])
total.append(i[1])
#calc percentage per entry
values=[0.0 for i in range(len(total))]
sum_total = sum(total)
#avoide devicion with 0
if sum_total == 0:
values = total
else:
#calc percentage
for i in range(len(total)):
values[i] = total[i] / sum_total
#check if to big
if(len(values) > 20):
label=label[:20]
values=values[:20]
#create figure
fig, axis = plt.subplots()
axis.barh(range(len(label)), values)
#description
#axis.set_title('IP Addresses form user')
axis.set_xlabel('Percent')
#axis.set_ylabel('Addresses')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label)))
axis.set_yticklabels(label)
return fig
#create plot which shows ip adresses in trace and how often it was used
def ip_distribution_trace(self, person, from_date, to_date):
trace_ip = self.datadb.get_ip_address_in_trace_distribution(person, from_date, to_date)
own_ip = self.datadb.get_ip_address_distribution(person, from_date, to_date)
#create list of device ip's to filter them out of trace
ips = []
for i in own_ip:
if i[0] == '-' : continue
ips.append(i[0])
label = []
total = []
for i in trace_ip:
if i[0] in ips: continue
if i[0] == '-': continue
label.append(i[0])
total.append(i[1])
#calc percentage per entry
values=[0.0 for i in range(len(total))]
sum_total = sum(total)
#avoide devicion with 0
if sum_total == 0:
values = total
else:
#calc percentage
for i in range(len(total)):
values[i] = total[i] / sum_total
#check if to big
if(len(values) > 20):
label=label[:20]
values=values[:20]
#create figure
fig, axis = plt.subplots()
axis.barh(range(len(label)), values)
#description
#axis.set_title('IP-Addresses in trace')
axis.set_xlabel('Percent')
#axis.set_ylabel('Addresses')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label)))
axis.set_yticklabels(label)
return fig
#create diagram which shows distribution of ISP of the end device
def isp_distribution(self, person, from_date, to_date):
timestamps = self.datadb.get_ip_address_distribution(person, from_date, to_date)
#store which ip was used and how often
labels_old = []
size_old = []
for i in timestamps:
if i[0] == '-': continue
labels_old.append(i[0])
size_old.append(i[1])
#get isp of ip and sum up how often it was used
label = []
size = []
for i in range(len(labels_old)):
owner = self.sub.find_Ownder(labels_old[i])
if owner not in label:
label.append(owner)
size.append(size_old[i])
else:
idx = label.index(owner)
size[idx] += size_old[i]
#calc percentage per entry
values=[0.0 for i in range(len(size))]
sum_total = sum(size)
#avoide devicion with 0
if sum_total == 0:
values = size
else:
#calc percentage
for i in range(len(size)):
values[i] = size[i] / sum_total
#check if to big
if(len(values) > 20):
label=label[:20]
values=values[:20]
#create figure
fig, axis = plt.subplots()
axis.barh(range(len(label)), values)
#description
#axis.set_title('ISP\'s of IP-Addresses')
axis.set_xlabel('Percent')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label)))
axis.set_yticklabels(label)
return fig
#create diagram which shows distribution of ISP of the trace addresses
def isp_distribution_in_trace(self, person, from_date, to_date):
timestamps = self.datadb.get_ip_address_in_trace_distribution(person, from_date, to_date)
own_ip = self.datadb.get_ip_address_distribution(person, from_date, to_date)
#create list of device ip's to filter them out of trace
ips = []
for i in own_ip:
if i[0] == '-' : continue
ips.append(i[0])
#store which ip was used and how often
labels_old = []
size_old = []
for i in timestamps:
if i[0] in ips: continue
if i[0] == '-': continue
labels_old.append(i[0])
size_old.append(i[1])
#get isp of ip and sum up how often it was used
label = []
size = []
for i in range(len(labels_old)):
owner = self.sub.find_Ownder(labels_old[i])
if owner not in label:
label.append(owner)
size.append(size_old[i])
else:
idx = label.index(owner)
size[idx] += size_old[i]
#calc percentage per entry
values=[0.0 for i in range(len(size))]
sum_total = sum(size)
#avoide devicion with 0
if sum_total == 0:
values = size
else:
#calc percentage
for i in range(len(size)):
values[i] = size[i] / sum_total
#check if to big
if(len(values) > 20):
label=label[:20]
values=values[:20]
#create figure
fig, axis = plt.subplots()
axis.barh(range(len(label)), values)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
axis.set_xlabel('Percent')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label)))
axis.set_yticklabels(label)
return fig
#create ip vs time scatter
def ip_vs_hour(self, person, from_date, to_date):
ips = self.datadb.get_ip_and_time(person, from_date, to_date)
label = []
x = [0 for i in range(len(ips))]
y = [0 for i in range(len(ips))]
#calculate x,y coordinates for dots
for i in ips:
if i[0] not in label:
label.append(i[0])
time = int(i[1].strftime("%H"))
x.append(time)
y.append(label.index(i[0]))
#create figure
fig, axis = plt.subplots()
axis.scatter(x,y)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
axis.set_xlabel('Time of day')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label)))
axis.set_yticklabels(label)
axis.set_xticks(range(24))
axis.set_xticklabels(range(24))
return fig
#create ip in trace vs time
def ip_in_trace_vs_hour(self, person, from_date, to_date):
timestamps = self.datadb.get_ip_and_time_trace(person, from_date, to_date)
own_ip = self.datadb.get_ip_address_distribution(person, from_date, to_date)
#create list of device ip's to filter them out of trace
ips = []
for i in own_ip:
if i[0] == '-' : continue
ips.append(i[0])
label = []
x = []
y = []
#calculate x,y coordinates for dots
for i in timestamps:
if i[0] in ips: continue
if i[0] == '-' : continue
if i[0] not in label:
label.append(i[0])
time = int(i[1].strftime("%H"))
x.append(time)
y.append(label.index(i[0]))
#create figure
fig, axis = plt.subplots()
axis.scatter(x,y)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
axis.set_xlabel('Time of day')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label)))
axis.set_yticklabels(label)
axis.set_xticks(range(24))
axis.set_xticklabels(range(24))
return fig
#create ISP vs time graph
def isp_vs_time(self, person, from_date, to_date):
timestamps = self.datadb.get_ip_and_time(person, from_date, to_date)
label = []
x = []
y = []
#calculate x,y coordinates for dots
for i in timestamps:
isp = self.sub.find_Ownder(i[0])
#check if ISP already exists in label value
if isp not in label:
label.append(isp)
time = int(i[1].strftime("%H"))
x.append(time)
y.append(label.index(isp))
#create figure
fig, axis = plt.subplots()
axis.scatter(x,y)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
axis.set_xlabel('Time of day')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label)))
axis.set_yticklabels(label)
axis.set_xticks(range(24))
axis.set_xticklabels(range(24))
return fig
#create ISP vs time graph
def isp_in_trace_vs_time(self, person, from_date, to_date):
timestamps = self.datadb.get_ip_and_time_trace(person, from_date, to_date)
own_ip = self.datadb.get_ip_address_distribution(person, from_date, to_date)
#create list of device ip's to filter them out of trace
ips = []
for i in own_ip:
if i[0] == '-': continue
ips.append(i[0])
label = []
x = []
y = []
#calculate x,y coordinates for dots
for i in timestamps:
if i[0] in ips: continue
if i[0] == '-': continue
isp = self.sub.find_Ownder(i[0])
if isp not in label:
label.append(isp)
time = int(i[1].strftime("%H"))
x.append(time)
y.append(label.index(isp))
#create figure
fig, axis = plt.subplots()
axis.scatter(x,y)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
axis.set_xlabel('Time of day')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label)))
axis.set_yticklabels(label)
axis.set_xticks(range(24))
axis.set_xticklabels(range(24))
return fig
#creates graph which shows amount of direct change in ip adresses
def ip_change(self, person, from_date, to_date):
ips = self.datadb.get_ip_sorted_by_time(person, from_date, to_date)
labels = []
values_total = []
#count changes
for i in range(len(ips)-1):
#create label
label = ""
if ips[i][0] == ips[i+1][0]: continue
if ips[i][0] < ips[i+1][0]:
label = ips[i][0] + "<->"+ ips[i+1][0]
else:
label = ips[i+1][0] + "<->"+ ips[i][0]
#check if label exists
if label not in labels:
labels.append(label)
values_total.append(0)
idx = labels.index(label)
values_total[idx] += 1
#calc percentage per entry
values=[0.0 for i in range(len(values_total))]
sum_total = sum(values_total)
#avoide devicion with 0
if sum_total == 0:
values = values_total
else:
#calc percentage
for i in range(len(values_total)):
values[i] = values_total[i] / sum_total
#check if to big
if(len(values) > 20):
labels=labels[:20]
values=values[:20]
#create figure
fig, axis = plt.subplots()
axis.barh(range(len(labels)), values)
#description
#axis.set_title('IP-Addresses in trace')
axis.set_xlabel('Percent')
#axis.set_ylabel('Addresses')
#set how many lables where needed and text for it
axis.set_yticks(range(len(labels)))
axis.set_yticklabels(labels)
return fig
#graph shows when a change occurred
def ip_change_vs_time(self, person, from_date, to_date):
ips = self.datadb.get_ip_and_time(person, from_date, to_date)
labels = []
x = [0 for i in range(len(ips))]
y = [0 for i in range(len(ips))]
#create edge [["from", "to"], ...]
for i in range(len(ips)-1):
#create label
label = ""
ip1 = ips[i][0]
ip2 = ips[i+1][0]
if ip1 ==ip2: continue
if ip1 < ip2:
label = ip1 + "<->"+ ip2
else:
label = ip2 + "<->"+ ip1
time = int(ips[i][1].strftime("%H"))
#add label
if label not in labels:
labels.append(label)
x.append(time)
y.append(labels.index(label))
#create figure
fig, axis = plt.subplots()
axis.scatter(x,y)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
axis.set_xlabel('Time of day')
#set how many lables where needed and text for it
axis.set_yticks(range(len(labels)))
axis.set_yticklabels(labels)
axis.set_xticks(range(24))
axis.set_xticklabels(range(24))
return fig
#graph shows when and how often a change occurred
def ip_change_vs_time_vs_frequency(self, person, from_date, to_date):
ips = self.datadb.get_ip_and_time(person, from_date, to_date)
labels = []
unique = []
x = []
y = []
count = []
for i in range(len(ips)-1):
#create label
label = ""
ip1 = ips[i][0]
ip2 = ips[i+1][0]
if ip1 == ip2: continue
if ip1 < ip2:
label = ip1 + "<->"+ ip2
else:
label = ip2 + "<->"+ ip1
time = int(ips[i][1].strftime("%H"))
#create label with time to count amount of unique changes
label1 = label+str(time)
#add label to display
if label not in labels:
labels.append(label)
#add label to count amount of unique changes
if label1 not in unique:
unique.append(label1)
x.append(time)
y.append(labels.index(label))
count.append(0)
count[unique.index(label1)] += 1
#create array which contains only unique values of the count array
#to make color legend
color_label =[]
for i in count:
if i not in color_label:
color_label.append(i)
#create figure
fig, axis = plt.subplots()
scatter = axis.scatter(x,y, c = count)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
axis.set_xlabel('Time of day')
#set how many lables where needed and text for it
axis.set_yticks(range(len(labels)))
axis.set_yticklabels(labels)
axis.set_xticks(range(24))
axis.set_xticklabels(range(24))
axis.legend(*scatter.legend_elements(), loc="lower left", title="Amount")
return fig
#creates graph which shows amount of direct change in isp
def isp_change(self, person, from_date, to_date):
ips = self.datadb.get_ip_sorted_by_time(person, from_date, to_date)
labels = []
values = []
#count changes
for i in range(len(ips)-1):
#create label
label = ""
ip1 = self.sub.find_Ownder(ips[i][0])
ip2 = self.sub.find_Ownder(ips[i+1][0])
if ip1 ==ip2: continue
if ip1 < ip2:
label = ip1 + "<->"+ ip2
else:
label = ip2 + "<->"+ ip1
#check if label exists
if label not in labels:
labels.append(label)
values.append(0)
idx = labels.index(label)
values[idx] += 1
#make label multiline
for i in range(len(labels)):
label = labels[i].split("<->")
labels[i] = label[0] + "\n<->\n" +label[1]
#create figure
fig, axis = plt.subplots()
axis.barh(range(len(labels)), values)
#description
#axis.set_title('IP-Addresses in trace')
axis.set_xlabel('Total')
#axis.set_ylabel('Addresses')
#set how many lables where needed and text for it
axis.set_yticks(range(len(labels)))
axis.set_yticklabels(labels)
return fig
#create graph which shows change in ISP visualy
def isp_change_graph(self, person, from_date, to_date, dark=1):
ips = self.datadb.get_ip_sorted_by_time(person, from_date, to_date)
labels = []
values = []
#create edge [["from", "to"], ...]
for i in range(len(ips)-1):
#create label
label = ""
ip1 = self.sub.find_Ownder(ips[i][0])
ip2 = self.sub.find_Ownder(ips[i+1][0])
if ip1 ==ip2: continue
if ip1 < ip2:
label = ip1 + "<->"+ ip2
else:
label = ip2 + "<->"+ ip1
#add edge
if label not in labels:
labels.append(label)
values.append([ip1, ip2])
values.append([ip2, ip1])
#create graph
G = nx.DiGraph()
G.add_edges_from(values)
#create figure
fig, axis = plt.subplots()
pos = nx.spring_layout(G)
if dark == 1:
rcParams.update({'figure.autolayout': True})
nx.draw_networkx_nodes(G, pos, node_color=["cyan" for i in range(len(pos))], ax=axis)
nx.draw(G,pos, edge_color=["yellow" for i in range(len(pos))] , ax=axis)
nx.draw_networkx_labels(G, pos, font_color="white", ax=axis, font_size=self.font_size)
axis.set_facecolor('black')
fig.set_facecolor('black')
else:
rcParams.update({'figure.autolayout': True})
nx.draw_networkx_nodes(G, pos, ax=axis)
nx.draw(G,pos, ax=axis)
nx.draw_networkx_labels(G, pos, ax=axis, font_size=self.font_size)
return fig
#create graph which shows when a chang in ISP occurred
def isp_change_vs_hour(self, person, from_date, to_date):
ips = self.datadb.get_ip_and_time_sorted(person, from_date, to_date)
labels = []
x = [0 for i in range(len(ips))]
y = [0 for i in range(len(ips))]
#create edge [["from", "to"], ...]
idx = 0
for i in range(len(ips)-1):
#create label
label = ""
ip1 = self.sub.find_Ownder(ips[i][0])
ip2 = self.sub.find_Ownder(ips[i+1][0])
if ip1 ==ip2: continue
if ip1 < ip2:
label = ip1 + "<->"+ ip2
else:
label = ip2 + "<->"+ ip1
time = int(ips[i][1].strftime("%H"))
#add label
if label not in labels:
labels.append(label)
x[idx] = time
y[idx] = labels.index(label)
idx += 1
#make label multiline
for i in range(len(labels)):
label = labels[i].split("<->")
labels[i] = label[0] + "\n<->\n" +label[1]
#create figure
fig, axis = plt.subplots()
axis.scatter(x,y)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
axis.set_xlabel('Time of day')
#set how many lables where needed and text for it
axis.set_yticks(range(len(labels)))
axis.set_yticklabels(labels)
axis.set_xticks(range(24))
axis.set_xticklabels(range(24))
return fig
#show how ofte a user is at a spesific city
def city_distribution(self, person, from_date, to_date):
cities = self.datadb.get_city_distribution(person, from_date, to_date)
label = []
total = []
#fill array
for i in cities:
if i[0] == '-' : continue
label.append(i[0])
total.append(i[1])
#calc percentage per entry
values=[0.0 for i in range(len(total))]
sum_total = sum(total)
#avoide devicion with 0
if sum_total == 0:
values = total
else:
#calc percentage
for i in range(len(total)):
values[i] = total[i] / sum_total
#check if to big
if(len(values) > 20):
label=label[:20]
values=values[:20]
#create figure
fig, axis = plt.subplots()
axis.barh(range(len(label)), values)
#description
#axis.set_title('IP Addresses form user')
axis.set_xlabel('Percent')
#axis.set_ylabel('Addresses')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label)))
axis.set_yticklabels(label)
return fig
#shows which ip was used at which city
def city_vs_ip(self, person, from_date, to_date):
ip_cities = self.datadb.get_ip_and_city(person, from_date, to_date)
label_ip = []
label_city = []
x = [0 for i in range(len(ip_cities))]
y = [0 for i in range(len(ip_cities))]
#calculate x,y coordinates for dots
for i in ip_cities:
if i[0] == '-' : continue
if i[1] == '-' : continue
if i[0] not in label_ip:
label_ip.append(i[0])
if i[1] not in label_city:
label_city.append(i[1])
x.append(label_city.index(i[1]))
y.append(label_ip.index(i[0]))
#create figure
fig, axis = plt.subplots()
axis.scatter(x,y)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
#axis.set_xlabel('City')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label_ip)))
axis.set_yticklabels(label_ip)
axis.set_xticks(range(len(label_city)))
axis.set_xticklabels(label_city)
return fig
#show chang in city
def city_change(self, person, from_date, to_date):
cities = self.datadb.get_city_sorted(person, from_date, to_date)
labels = []
values_total = []
#count changes
for i in range(len(cities)-1):
if cities[i][0] == '-' : continue
if cities[i+1][0] == '-' : continue
#create label
label = ""
if cities[i][0] == cities[i+1][0]: continue
if cities[i][0] < cities[i+1][0]:
label = cities[i][0] + "<->"+ cities[i+1][0]
else:
label = cities[i+1][0] + "<->"+ cities[i][0]
#check if label exists
if label not in labels:
labels.append(label)
values_total.append(0)
idx = labels.index(label)
values_total[idx] += 1
#calc percentage per entry
values=[0.0 for i in range(len(values_total))]
sum_total = sum(values_total)
#avoide devicion with 0
if sum_total == 0:
values = values_total
else:
#calc percentage
for i in range(len(values_total)):
values[i] = values_total[i] / sum_total
#check if to big
if(len(values) > 20):
label=label[:20]
values=values[:20]
#create figure
fig, axis = plt.subplots()
axis.barh(range(len(labels)), values)
#description
#axis.set_title('IP-Addresses in trace')
axis.set_xlabel('Percent')
#axis.set_ylabel('Addresses')
#set how many lables where needed and text for it
axis.set_yticks(range(len(labels)))
axis.set_yticklabels(labels)
return fig
#show how often the city was changed an when
def city_change_vs_time_vs_frequency(self, person, from_date, to_date):
cities = self.datadb.get_city_time(person, from_date, to_date)
labels = []
unique = []
x = []
y = []
count = []
for i in range(len(cities)-1):
if cities[i][0] == '-' : continue
if cities[i+1][0] == '-' : continue
#create label
label = ""
city1 = cities[i][0]
city2 = cities[i+1][0]
if city1 == city2: continue
if city1 < city2:
label = city1 + "<->"+ city2
else:
label = city2 + "<->"+ city1
time = int(cities[i][1].strftime("%H"))
#create label with time to count amount of unique changes
label1 = label+str(time)
#add label to display
if label not in labels:
labels.append(label)
#add label to count amount of unique changes
if label1 not in unique:
unique.append(label1)
x.append(time)
y.append(labels.index(label))
count.append(0)
count[unique.index(label1)] += 1
#create array which contains only unique values of the count array
#to make color legend
color_label =[]
for i in count:
if i not in color_label:
color_label.append(i)
#create figure
fig, axis = plt.subplots()
scatter = axis.scatter(x,y, c = count)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
axis.set_xlabel('Time of day')
#set how many lables where needed and text for it
axis.set_yticks(range(len(labels)))
axis.set_yticklabels(labels)
axis.set_xticks(range(24))
axis.set_xticklabels(range(24))
axis.legend(*scatter.legend_elements(), loc="lower left", title="Amount")
return fig
#show city graph
def city_graph(self, person, from_date, to_date, dark=1):
cities = self.datadb.get_city_time(person, from_date, to_date)
labels = []
values = []
#create edge [["from", "to"], ...]
for i in range(len(cities)-1):
#create label
label = ""
ip1 = cities[i][0]
ip2 = cities[i+1][0]
if ip1 == '-' : continue
if ip2 == '-' : continue
if ip1 ==ip2: continue
if ip1 < ip2:
label = ip1 + "<->"+ ip2
else:
label = ip2 + "<->"+ ip1
#add edge
if label not in labels:
labels.append(label)
values.append([ip1, ip2])
values.append([ip2, ip1])
#create graph
G = nx.DiGraph()
G.add_edges_from(values)
#create figure
fig, axis = plt.subplots()
pos = nx.spring_layout(G)
if dark == 1:
rcParams.update({'figure.autolayout': True})
nx.draw_networkx_nodes(G, pos, node_color=["cyan" for i in range(len(pos))], ax=axis)
nx.draw(G,pos, edge_color=["yellow" for i in range(len(pos))] , ax=axis)
nx.draw_networkx_labels(G, pos, font_color="white", ax=axis, font_size=self.font_size)
axis.set_facecolor('black')
fig.set_facecolor('black')
else:
rcParams.update({'figure.autolayout': True})
nx.draw_networkx_nodes(G, pos, ax=axis)
nx.draw(G,pos, ax=axis)
nx.draw_networkx_labels(G, pos, ax=axis, font_size=self.font_size)
return fig
#show which isp was used in which city
def city_vs_isp(self, person, from_date, to_date):
ip_cities = self.datadb.get_ip_and_city(person, from_date, to_date)
label_ip = []
label_city = []
x = [0 for i in range(len(ip_cities))]
y = [0 for i in range(len(ip_cities))]
#calculate x,y coordinates for dots
idx = 0
for i in ip_cities:
if i[0] == '-' : continue
if i[1] == '-' : continue
isp = self.sub.find_Ownder(i[0])
if isp not in label_ip:
label_ip.append(isp)
if i[1] not in label_city:
label_city.append(i[1])
x[idx] = label_city.index(i[1])
y[idx] = label_ip.index(isp)
idx += 1
#create figure
fig, axis = plt.subplots()
axis.scatter(x,y)
#description
#axis.set_title('ISP\'s of IP-Addresses in Trace')
#axis.set_xlabel('City')
#set how many lables where needed and text for it
axis.set_yticks(range(len(label_ip)))
axis.set_yticklabels(label_ip)
axis.set_xticks(range(len(label_city)))
axis.set_xticklabels(label_city)
return fig
#get json which descripes possible images and description for the images
def get_diagram_json(self, user, from_date, to_date):
json_str = \
'{"categories":['+\
'{"name": "Measurement", "id": "measurement", "images": ['+\
'{"url": "/image/'+user+'_0_0_'+from_date+'_'+to_date+'.png", "alt":"Distance Hour", "description":"Shows how frequently measurements were taken. e.g. 1 and 0.6 means, 60% of the measurements were taken one hour apart."} '+\
',{"url": "/image/'+user+'_0_1_'+from_date+'_'+to_date+'.png", "alt":"Distance Minutes", "description":"Shows how frequently in Minutes when they are less than one hour apart."} '+\
',{"url": "/image/'+user+'_0_2_'+from_date+'_'+to_date+'.png", "alt":"Day", "description":"Shows how many measurements were taken per day of the week."} '+\
',{"url": "/image/'+user+'_0_3_'+from_date+'_'+to_date+'.png", "alt":"Time", "description":"Shows at which time of the day the reqest was send."} '+\
']}'+\
',{"name": "Address distribution", "id": "address_distribution", "images": ['+\
'{"url": "/image/'+user+'_1_0_'+from_date+'_'+to_date+'.png", "alt":"IP Addresses distribution", "description":"Shows distribution of IP-End-Addresses of the user\'s device."}'+\
',{"url": "/image/'+user+'_1_1_'+from_date+'_'+to_date+'.png", "alt":"IP Addresses distribution in trace", "description":"Shows different IP-Addresses of the route to the user, captured by trace."}'+\
',{"url": "/image/'+user+'_1_2_'+from_date+'_'+to_date+'.png", "alt":"ISP distribution", "description":"Shows ISP of the IP-End-Addresses of the user\'s device."}'+\
',{"url": "/image/'+user+'_1_3_'+from_date+'_'+to_date+'.png", "alt":"ISP distribution in trace", "description":"Shows ISP of the IP-Addresses in the trace of the route to the user, captured by trace."}'+\
']}'+\
',{"name": "Address / Time", "id": "address_Time", "images": ['+\
'{"url": "/image/'+user+'_2_0_'+from_date+'_'+to_date+'.png", "alt":"IP / Hour", "description":"Shows which IP-Address was used at which time"}'+\
',{"url": "/image/'+user+'_2_1_'+from_date+'_'+to_date+'.png", "alt":"IP in trace / Hour", "description":"Shows which IP-Address in trace was used at which time"}'+\
',{"url": "/image/'+user+'_2_2_'+from_date+'_'+to_date+'.png", "alt":"ISP / Hour", "description":"Shows which ISP was used at which time"}'+\
',{"url": "/image/'+user+'_2_3_'+from_date+'_'+to_date+'.png", "alt":"ISP in trace / Hour", "description":"Shows which ISP in trace was used at which time"}'+\
']}'+\
',{"name": "Changes in Address", "id": "changes_in_address", "images": ['+\
'{"url": "/image/'+user+'_3_0_'+from_date+'_'+to_date+'.png", "alt":"IP Address changes", "description":"Shows how often a change within IP Adresses occurred"}'+\
',{"url": "/image/'+user+'_3_1_'+from_date+'_'+to_date+'.png", "alt":"IP Address changes / Hour ", "description":"Shows how often a change within IP Adresses occurred and when"}'+\
',{"url": "/image/'+user+'_3_2_'+from_date+'_'+to_date+'.png", "alt":"IP Address changes / Hour / Frequency", "description":"Shows frequency of changes in IP Address"}'+\
',{"url": "/image/'+user+'_3_3_'+from_date+'_'+to_date+'.png", "alt":"ISP changes", "description":"Shows how often change within ISP occurred"}'+\
',{"url": "/image/'+user+'_3_4_'+from_date+'_'+to_date+'.png", "alt":"ISP changes graph", "description":"Shows change in ISP"}'+\
',{"url": "/image/'+user+'_3_5_'+from_date+'_'+to_date+'.png", "alt":"ISP changes / Hour", "description":"Shows when a chang in ISP occurred"}'+\
']}'+\
',{"name": "Geographical", "id": "geographical", "images": ['+\
'{"url": "/image/'+user+'_4_0_'+from_date+'_'+to_date+'.png", "alt":"City distribution", "description":"Show distribution of the Cities visited"} '+\
',{"url": "/image/'+user+'_4_1_'+from_date+'_'+to_date+'.png", "alt":"City / IP", "description":"Shows which IP was used at which City"} '+\
',{"url": "/image/'+user+'_4_2_'+from_date+'_'+to_date+'.png", "alt":"City change", "description":"Shows change in city and how often in occurred"} '+\
',{"url": "/image/'+user+'_4_3_'+from_date+'_'+to_date+'.png", "alt":"City change / Time / Frequency", "description":"Shows change when it occred and how often"} '+\
',{"url": "/image/'+user+'_4_4_'+from_date+'_'+to_date+'.png", "alt":"City graph", "description":"Show change in Cities"} '+\
',{"url": "/image/'+user+'_4_5_'+from_date+'_'+to_date+'.png", "alt":"City / ISP", "description":"Show which ISP was used in which City"} '+\
']}'+\
']}'
return json.loads(json_str)
#create compare json from the get_diagram_json method
def get_compare_json(self, user1, user2, from_date, to_date):
j = self.get_diagram_json(user1, from_date, to_date)
new_j = {}
new_cat = []
#ad url1 to each image entry in the json
for i in j['categories']:
new_j = {}
new_image = []
new_j["name"] = i["name"]
for k in i['images']:
val = k['url'].split("_")
k['url1'] = "/image/" + user2 + "_" + val[1] +"_" +val[2]+"_" +val[3]+"_" +val[4]
new_image.append(k)
new_j['images'] = new_image
new_cat.append(new_j)
new_j["categories"] = new_cat
return new_j
| 33.574124
| 244
| 0.548912
| 6,456
| 49,824
| 4.085502
| 0.0522
| 0.035297
| 0.039051
| 0.054671
| 0.838224
| 0.816879
| 0.794093
| 0.762436
| 0.739536
| 0.719366
| 0
| 0.018338
| 0.330182
| 49,824
| 1,483
| 245
| 33.596763
| 0.772001
| 0.151895
| 0
| 0.754135
| 0
| 0.019846
| 0.099314
| 0.002191
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034179
| false
| 0
| 0.015436
| 0
| 0.085998
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
68f608ea5ec32d3e2bc7572167f2de3aec5a48a8
| 11,820
|
py
|
Python
|
tests/test_layer.py
|
ttomasz/tilekiln
|
687d92fee2744d2ec828f3a2e15cc5dc3143c24c
|
[
"0BSD"
] | 25
|
2020-03-15T00:52:37.000Z
|
2022-01-20T01:00:48.000Z
|
tests/test_layer.py
|
ttomasz/tilekiln
|
687d92fee2744d2ec828f3a2e15cc5dc3143c24c
|
[
"0BSD"
] | 17
|
2020-04-02T07:03:56.000Z
|
2021-08-22T13:46:54.000Z
|
tests/test_layer.py
|
ttomasz/tilekiln
|
687d92fee2744d2ec828f3a2e15cc5dc3143c24c
|
[
"0BSD"
] | 4
|
2020-04-06T06:46:11.000Z
|
2021-06-07T18:38:32.000Z
|
from unittest import TestCase
from unittest.mock import Mock
from tilekiln.layer import Layer
from tilekiln.definition import Definition
from tilekiln.database import Database
from fs.memoryfs import MemoryFS
class TestLayer(TestCase):
def test_equals(self):
fs1 = MemoryFS()
fs1.writetext('foo.sql', 'select 1')
fs2 = MemoryFS()
fs2.writetext('foo.sql', 'select 2')
fs2.writetext('bar.sql', 'select 1')
self.assertEqual(Layer("water",
{"fields": {"water": "type of water"},
"geometry_type": ["polygon"],
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1),
Layer("water",
{"fields": {"water": "type of water"},
"geometry_type": ["polygon"],
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1))
# Only file contents should matter
self.assertEqual(Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1),
Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "bar.sql"}]
}, fs2))
# Order of geometry_type doesn't matter
self.assertEqual(Layer("water",
{"fields": {"water": "type of water"},
"geometry_type": ["point", "polygon"],
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1),
Layer("water",
{"fields": {"water": "type of water"},
"geometry_type": ["polygon", "point"],
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1))
# id
self.assertFalse(Layer("land",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1) ==
Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1))
# fields
self.assertFalse(Layer("water",
{"fields": {"water": "type of land"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1) ==
Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1))
self.assertFalse(Layer("water",
{"fields": {"land": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1) ==
Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1))
# geometry_type
self.assertFalse(Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"geometry_type": ["polygon"],
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1) ==
Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"geometry_type": ["point"],
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1))
self.assertFalse(Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"geometry_type": ["polygon", "point"],
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1) ==
Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"geometry_type": ["point"],
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1))
# zooms
self.assertFalse(Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 2,
"file": "foo.sql"}]
}, fs1) ==
Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1))
self.assertFalse(Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 2, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1) ==
Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1))
# sql
self.assertFalse(Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs1) ==
Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4,
"file": "foo.sql"}]
}, fs2))
def test_geometry_type(self):
fs = MemoryFS()
fs.writetext('1.sql', 'select 1')
layer = Layer("water",
{"geometry_type": ["polygon"],
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4, "file": "1.sql"}]
}, fs)
self.assertEqual(layer.geometry_type, set(["polygon"]))
layer = Layer("water",
{"geometry_type": ["polygon", "point"],
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4, "file": "1.sql"}]
}, fs)
self.assertEqual(layer.geometry_type, set(["polygon", "point"]))
def test_definition_for_zoom(self):
fs = MemoryFS()
fs.writetext('1.sql', 'select 1')
fs.writetext('2.sql', 'select 2')
layer = Layer("water",
{"fields": {},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4, "file": "1.sql"},
{"minzoom": 5, "maxzoom": 8, "file": "2.sql"}]
}, fs)
for i in range(0, 5):
self.assertEqual(layer.definition_for_zoom(i),
Definition("water", "select 1", 0, 4, None, None))
for i in range(5, 9):
self.assertEqual(layer.definition_for_zoom(i),
Definition("water", "select 2", 5, 8, None, None))
self.assertIsNone(layer.definition_for_zoom(9), None)
def test_fields(self):
fs = MemoryFS()
fs.writetext('1.sql', 'select 1')
layer = Layer("water",
{"fields": {"water": "type of water"},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4, "file": "1.sql"}]
}, fs)
self.assertEqual(layer.fields["water"], "type of water")
def test_render_tile(self):
fs = MemoryFS()
fs.writetext('1.sql', 'select 1')
fs.writetext('2.sql', 'select 2')
layer = Layer("water",
{"fields": {},
"description": "Waterbody and ocean areas",
"sql": [{"minzoom": 0, "maxzoom": 4, "file": "1.sql"},
{"minzoom": 5, "maxzoom": 8, "file": "2.sql"}]
}, fs)
db = Database(Mock())
db.generate_tilelayer = Mock(return_value=b'foo')
layer.render_tile((0, 0, 0), db)
db.generate_tilelayer.assert_called_with(layer.definition_for_zoom(0),
(0, 0, 0))
| 49.045643
| 79
| 0.350508
| 869
| 11,820
| 4.724971
| 0.088608
| 0.070628
| 0.151242
| 0.184121
| 0.818802
| 0.813444
| 0.811008
| 0.811008
| 0.800779
| 0.800779
| 0
| 0.021608
| 0.510575
| 11,820
| 240
| 80
| 49.25
| 0.688159
| 0.008799
| 0
| 0.813397
| 0
| 0
| 0.244704
| 0
| 0
| 0
| 0
| 0
| 0.086124
| 1
| 0.023923
| false
| 0
| 0.028708
| 0
| 0.057416
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ec504b30d86a0a358037081e0cefb86fe41b74b0
| 109
|
py
|
Python
|
python/module/calc.py
|
wjiec/packages
|
4ccaf8f717265a1f8a9af533f9a998b935efb32a
|
[
"MIT"
] | null | null | null |
python/module/calc.py
|
wjiec/packages
|
4ccaf8f717265a1f8a9af533f9a998b935efb32a
|
[
"MIT"
] | 1
|
2016-09-15T07:06:15.000Z
|
2016-09-15T07:06:15.000Z
|
python/module/calc.py
|
wjiec/packages
|
4ccaf8f717265a1f8a9af533f9a998b935efb32a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python35
import calc.calc
# calc.calc.add
# package.module.method
print(calc.calc.add(-2, 100))
| 13.625
| 29
| 0.715596
| 18
| 109
| 4.333333
| 0.666667
| 0.410256
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 0.100917
| 109
| 7
| 30
| 15.571429
| 0.734694
| 0.495413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.