hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d517a25ae90933b6e2d861c6bbb5b1cddd46394
| 44
|
py
|
Python
|
src/data/augmix/__init__.py
|
Julienbeaulieu/kaggle-computer-vision-competition
|
7bc6bcb8b85d81ff1544040c403e356c0a3c8060
|
[
"MIT"
] | 14
|
2020-12-07T22:24:17.000Z
|
2022-03-30T05:11:55.000Z
|
src/data/augmix/__init__.py
|
Julienbeaulieu/kaggle-computer-vision-competition
|
7bc6bcb8b85d81ff1544040c403e356c0a3c8060
|
[
"MIT"
] | null | null | null |
src/data/augmix/__init__.py
|
Julienbeaulieu/kaggle-computer-vision-competition
|
7bc6bcb8b85d81ff1544040c403e356c0a3c8060
|
[
"MIT"
] | 4
|
2020-02-22T17:54:23.000Z
|
2022-01-31T06:41:11.000Z
|
from .augment_and_mix import augment_and_mix
| 44
| 44
| 0.909091
| 8
| 44
| 4.5
| 0.625
| 0.555556
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 1
| 44
| 44
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
b4db151f423ac09d9ebd6b2e1f98dba6aebaea1f
| 26,629
|
py
|
Python
|
tests/test_mongobar.py
|
ingkebil/mongobar
|
aded858a8280bd5ac34f2e3733d77d0b95bf5c4d
|
[
"MIT"
] | 49
|
2017-09-01T20:01:11.000Z
|
2021-05-12T15:14:17.000Z
|
tests/test_mongobar.py
|
ingkebil/mongobar
|
aded858a8280bd5ac34f2e3733d77d0b95bf5c4d
|
[
"MIT"
] | 1
|
2017-10-27T09:25:42.000Z
|
2017-11-30T21:40:35.000Z
|
tests/test_mongobar.py
|
ingkebil/mongobar
|
aded858a8280bd5ac34f2e3733d77d0b95bf5c4d
|
[
"MIT"
] | 1
|
2020-07-23T12:46:18.000Z
|
2020-07-23T12:46:18.000Z
|
import sys; sys.path.append("../") # noqa
import unittest
import logging
import os
import subprocess
import datetime
import pymongo
from unittest import mock
import mongobar
# Mocked Mongo Client
class MockedMongoDocument(mock.Mock):
pass
class MockedMongoCollection(mock.Mock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mocked_documents = [
MockedMongoDocument(),
MockedMongoDocument(),
MockedMongoDocument()
]
def count(self):
return len(self._mocked_documents)
class MockedMongoDatabase(mock.Mock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mocked_collections = {
"c1": MockedMongoCollection(),
"c2": MockedMongoCollection(),
"c3": MockedMongoCollection(),
}
def __getitem__(self, key):
return self._mocked_collections[key]
def collection_names(self):
return [key for key in self._mocked_collections.keys()]
class MockedMongoClient(mock.Mock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mocked_databases = {
"local": MockedMongoDatabase(),
"d1": MockedMongoDatabase(),
"d2": MockedMongoDatabase(),
"d3": MockedMongoDatabase(),
}
def __getitem__(self, key):
return self._mocked_databases[key]
def database_names(self):
return [key for key in self._mocked_databases.keys()]
# Mocked Backup Metadata
MOCKED_BACKUP_METADATA_1_DB = {
"host": "localhost",
"port": 27017,
"date": datetime.datetime.today().isoformat(),
"databases": [{
"name": "d1",
"collections": [{
"name": "c1",
"document_count": 1
},{
"name": "c2",
"document_count": 1
},
{
"name": "c3",
"document_count": 1
}]
}]
}
MOCKED_BACKUP_METADATA_3_DBS = {
"host": "localhost",
"port": 27017,
"date": datetime.datetime.today().isoformat(),
"databases": [{
"name": "d1",
"collections": [{
"name": "c1",
"document_count": 1
},{
"name": "c2",
"document_count": 1
},
{
"name": "c3",
"document_count": 1
}]
},{
"name": "d2",
"collections": [{
"name": "c1",
"document_count": 1
},{
"name": "c2",
"document_count": 1
},
{
"name": "c3",
"document_count": 1
}]
},{
"name": "d3",
"collections": [{
"name": "c1",
"document_count": 1
},{
"name": "c2",
"document_count": 1
},
{
"name": "c3",
"document_count": 1
}]
}]
}
# Test Mongobar Package
@mock.patch("mongobar.mongobar.pymongo.MongoClient", new_callable=MockedMongoClient)
class TestMongobar(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.getLogger("mongobar").addHandler(logging.NullHandler())
# generate_backup_name
@mock.patch("mongobar.mongobar.pkgutil.get_data", side_effect=[b"foo", b"bar"])
def test__generate_backup_name(self, *args):
name = mongobar.Mongobar().generate_backup_name()
self.assertEqual(name, "foo-bar")
args[0].assert_called()
# create_pymongo_client
def test__create_pymongo_client__default_connection(self, mongoclient):
m = mongobar.Mongobar()
m.create_pymongo_client()
mongoclient.assert_called_with(
host="localhost",
port=27017
)
def test__create_pymongo_client__custom_connection(self, mongoclient):
m = mongobar.Mongobar()
m.config.add({
"connections": {
"custom": {
"host": "custom",
"port": 27017
}
}
})
m.config.connection = "custom"
m.create_pymongo_client()
mongoclient.assert_called_with(
host="custom",
port=27017
)
def test__create_pymongo_client__auth_options(self, mongoclient):
m = mongobar.Mongobar()
m.config.add({
"connections": {
"default": {
"host": "localhost",
"port": 27017,
"username": "user",
"password": "pass",
"authdb": "authdb"
}
}
})
m.create_pymongo_client()
mongoclient.assert_called_with(
host="localhost",
port=27017,
username="user",
password="pass",
authSource="authdb"
)
def test__create_pymongo_client__auth_options(self, mongoclient):
mongoclient.side_effect = pymongo.errors.PyMongoError()
m = mongobar.Mongobar()
m.config.add({
"connections": {
"default": {
"host": "localhost",
"port": 27017,
"username": "user",
"password": "pass",
"authdb": "authdb"
}
}
})
with self.assertRaises(mongobar.exceptions.ServerConnectionError):
m.create_pymongo_client()
# generate_metadata
def test__generate_metadata__databases_arg(self, mongoclient):
m = mongobar.Mongobar()
metadata = m.generate_metadata(databases=["d1", "d2", "d3"])
self.assertIn("host", metadata)
self.assertIn("port", metadata)
self.assertIn("date", metadata)
self.assertIn("databases", metadata)
for database in metadata["databases"]:
self.assertIn("name", database)
self.assertIn("collections", database)
def test__generate_metadata__databases_arg__remove_local(self, mongoclient):
m = mongobar.Mongobar()
metadata = m.generate_metadata(databases=["d1", "d2", "d3", "local"])
self.assertNotIn("local", metadata["databases"])
# write_metadata
@mock.patch("builtins.open", new_callable=mock.mock_open)
@mock.patch("mongobar.mongobar.json.dump")
def test__write_metadata(self, dump, open_, mongoclient):
m = mongobar.Mongobar()
m.write_metadata("name", {"key": "value"})
path = os.path.join(
m.config.connection_dir,
"name",
"metadata.json"
)
open_.assert_called_with(path, "w+")
file_handle = open_()
dump.assert_called_with({"key": "value"}, file_handle)
# read_metadata
@mock.patch("builtins.open", new_callable=mock.mock_open)
@mock.patch("mongobar.mongobar.json.loads")
def test__read_metadata(self, loads, open_, mongoclient):
m = mongobar.Mongobar()
m.read_metadata("name")
path = os.path.join(
m.config.connection_dir,
"name",
"metadata.json"
)
open_.assert_called_with(path, "r")
file_handle = open_()
file_handle.read.assert_called()
loads.assert_called_with("")
@mock.patch("builtins.open", side_effect=FileNotFoundError())
@mock.patch("mongobar.mongobar.json.loads")
def test__read_metadata__file_not_found(self, loads, open_, mongoclient):
m = mongobar.Mongobar()
self.assertEqual(m.read_metadata("name"), {
"host": "localhost",
"port": 27017,
"date": "0001-01-01T00:00:00.0000",
"databases": [],
"message": "Metadata not found"
})
# backup
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output")
def test__backup(self, check_output, *args):
m = mongobar.Mongobar()
m.backup()
directory = os.path.join(m.config.connection_dir, "foo-bar")
self.assertIn(
mock.call([
"mongodump",
"--host", "localhost",
"--port", "27017",
"--db", "d1",
"--out", directory,
"--quiet",
"--gzip"
]),
check_output.call_args_list
)
@mock.patch("mongobar.mongobar.os.path.exists", side_effect=[False, True])
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output")
def test__backup__create_root_directory(self, *args):
m = mongobar.Mongobar()
m.backup()
self.assertIn(mock.call(m.config.root), args[3].call_args_list)
@mock.patch("mongobar.mongobar.os.path.exists", side_effect=[True, False])
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output")
def test__backup__create_host_directory(self, *args):
m = mongobar.Mongobar()
m.backup()
self.assertIn(mock.call(m.config.connection_dir), args[3].call_args_list)
@mock.patch("mongobar.mongobar.os.path.exists", return_value=False)
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output")
def test__backup__message_arg(self, *args):
m = mongobar.Mongobar()
m.backup(message="foo")
# extract metadata arg passed to self.write_metadata
write_metadata_mock = args[1]
write_metadata_calls = write_metadata_mock.call_args_list[0]
write_metadata_args = write_metadata_calls[0]
metadata = write_metadata_args[1]
self.assertEqual(metadata["message"], "foo")
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output")
def test__backup__auth_args(self, *args):
m = mongobar.Mongobar()
m.config.add({
"connections": {
"default": {
"host": "localhost",
"port": 27017,
"username": "user",
"password": "pass",
"authdb": "authdb"
}
}
})
m.backup()
self.assertIn(
mock.call([
"mongodump",
"--host", "localhost",
"--port", "27017",
"-u", "user",
"-p", "pass",
"--authenticationDatabase", "authdb",
"--db", "d1",
"--out", os.path.join(m.config.connection_dir, "foo-bar"),
"--quiet",
"--gzip"
]),
args[0].call_args_list
)
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output")
def test__backup__db_does_not_exist__command_called(self, check_output, *args):
m = mongobar.Mongobar()
m.backup(databases=["foobar"])
self.assertIn(
mock.call([
"mongodump",
"--host", "localhost",
"--port", "27017",
"--db", "foobar",
"--out", os.path.join(m.config.connection_dir, "foo-bar"),
"--quiet",
"--gzip"
]),
check_output.call_args_list
)
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output", side_effect=[subprocess.CalledProcessError(1, "")])
def test__backup__db_arg__raises_CalledProcessError(self, check_output, *args):
m = mongobar.Mongobar()
with self.assertRaises(mongobar.exceptions.CommandError):
m.backup()
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output")
def test__backup__collection_arg(self, check_output, *args):
m = mongobar.Mongobar()
m.backup(databases=["d1"], collections=["c1"])
self.assertIn(
mock.call([
"mongodump",
"--host", "localhost",
"--port", "27017",
"--db", "d1",
"--collection", "c1",
"--out", os.path.join(m.config.connection_dir, "foo-bar"),
"--quiet",
"--gzip"
]),
check_output.call_args_list
)
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output")
def test__backup__collection_does_not_exist__command_called(self, check_output, *args):
m = mongobar.Mongobar()
m.backup(databases=["d1"], collections=["foobar"])
self.assertIn(
mock.call([
"mongodump",
"--host", "localhost",
"--port", "27017",
"--db", "d1",
"--collection", "foobar",
"--out", os.path.join(m.config.connection_dir, "foo-bar"),
"--quiet",
"--gzip"
]),
check_output.call_args_list
)
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
@mock.patch("mongobar.Mongobar.generate_backup_name", return_value="foo-bar")
@mock.patch("mongobar.mongobar.get_directories", return_value=[])
@mock.patch("mongobar.mongobar.create_directory", return_value=True)
@mock.patch("mongobar.Mongobar.generate_metadata", return_value=MOCKED_BACKUP_METADATA_1_DB)
@mock.patch("mongobar.Mongobar.write_metadata")
@mock.patch("mongobar.mongobar.subprocess.check_output", side_effect=[subprocess.CalledProcessError(1, "")])
def test__backup__collection_arg__raises_CalledProcessError(self, check_output, *args):
m = mongobar.Mongobar()
with self.assertRaises(mongobar.exceptions.CommandError):
m.backup(collections=["foobar"])
# restore
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output")
@mock.patch("mongobar.mongobar.os.path.exists", return_value=False)
def test__restore__raises_BackupNotFoundError(self, *args):
m = mongobar.Mongobar()
with self.assertRaises(mongobar.exceptions.BackupNotFoundError):
m.restore("foobar")
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output")
@mock.patch("mongobar.mongobar.os.path.exists")
def test__restore(self, *args):
m = mongobar.Mongobar()
m.restore("d1")
directory = os.path.join(m.config.connection_dir, "d1")
self.assertIn(
mock.call([
"mongorestore",
"--host", "localhost",
"--port", "27017",
"--nsInclude", "d1.*",
"--drop",
"--dir", directory,
"--gzip"
]),
args[1].call_args_list
)
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output")
@mock.patch("mongobar.mongobar.os.path.exists")
def test__restore__databases_arg__raises_DatabaseNotFoundInBackupError(self, *args):
m = mongobar.Mongobar()
with self.assertRaises(mongobar.exceptions.DatabaseNotFoundInBackupError):
m.restore("backup", databases=["foobar"])
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output")
@mock.patch("mongobar.mongobar.os.path.exists")
def test__restore__collections_arg__raises_CollectionNotFoundInBackupError(self, *args):
m = mongobar.Mongobar()
with self.assertRaises(mongobar.exceptions.CollectionNotFoundInBackupError):
m.restore("backup", collections=["foobar"])
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output")
@mock.patch("mongobar.mongobar.os.path.exists")
def test__restore__destination_databases_arg__raises_DestinationDatabasesLengthError(self, *args):
m = mongobar.Mongobar()
with self.assertRaises(mongobar.exceptions.DestinationDatabasesLengthError):
m.restore("backup", destination_databases=["foobar"])
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output")
@mock.patch("mongobar.mongobar.os.path.exists")
def test__restore__authentication_options(self, *args):
m = mongobar.Mongobar()
m.config.add({
"connections": {
"default": {
"host": "localhost",
"port": 27017,
"username": "user",
"password": "pass",
"authdb": "authdb"
}
}
})
m.restore("backup")
directory = os.path.join(m.config.connection_dir, "backup")
self.assertIn(
mock.call([
"mongorestore",
"--host", "localhost",
"--port", "27017",
"-u", "user",
"-p", "pass",
"--authenticationDatabase", "authdb",
"--nsInclude", "d1.*",
"--drop",
"--dir", directory,
"--gzip"
]),
args[1].call_args_list
)
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output")
@mock.patch("mongobar.mongobar.os.path.exists")
def test__restore__destination_databases_arg(self, *args):
m = mongobar.Mongobar()
m.restore("backup", databases=["d1"], destination_databases=["destination"])
directory = os.path.expanduser("~/.mongobar_backups/localhost:27017/backup/d1")
args[1].assert_called_with([
"mongorestore",
"--host", "localhost",
"--port", "27017",
"--db", "destination",
"--nsInclude", "d1.*",
"--drop",
"--dir", directory,
"--gzip"
])
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output", side_effect=[subprocess.CalledProcessError(1, "")])
@mock.patch("mongobar.mongobar.os.path.exists")
def test__restore__raises_CommandError(self, *args):
m = mongobar.Mongobar()
with self.assertRaises(mongobar.exceptions.CommandError):
m.restore("backup")
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output")
@mock.patch("mongobar.mongobar.os.path.exists")
def test__restore__collection_arg(self, *args):
m = mongobar.Mongobar()
m.restore("backup", collections=["c1"])
self.assertIn(
mock.call([
"mongorestore",
"--host", "localhost",
"--port", "27017",
"--nsInclude", "d1.c1",
"--drop",
"--dir", os.path.join(m.config.connection_dir, "backup"),
"--gzip"
]),
args[1].call_args_list
)
@mock.patch("mongobar.Mongobar.backup")
@mock.patch("mongobar.Mongobar.read_metadata", return_value=MOCKED_BACKUP_METADATA_3_DBS)
@mock.patch("mongobar.mongobar.subprocess.check_output", side_effect=[subprocess.CalledProcessError(1, "")])
@mock.patch("mongobar.mongobar.os.path.exists")
def test__restore__collection_arg__raises_CommandError(self, *args):
m = mongobar.Mongobar()
with self.assertRaises(mongobar.exceptions.CommandError):
m.restore("backup", collections=["c1"])
# get connection directories
@mock.patch("mongobar.mongobar.os.path.exists", return_value=False)
@mock.patch("mongobar.mongobar.get_directories", return_value=["d1", "d2"])
def test__get_hosts__directory_does_not_exist(self, *args):
m = mongobar.Mongobar()
self.assertEqual(m.get_connection_directories(), [])
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
@mock.patch("mongobar.mongobar.get_directories", return_value=["d1", "d2"])
def test__get_connection_directories__return_names(self, *args):
m = mongobar.Mongobar()
m.get_connection_directories()
args[0].assert_called_with(m.config.root)
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
@mock.patch("mongobar.mongobar.get_directories", side_effect=[["host"],["db"]])
def test__get_connection_directories__return_names_and_counts(self, *args):
m = mongobar.Mongobar()
m.get_connection_directories(count=True)
self.assertEqual(
args[0].call_args_list,
[
mock.call(m.config.root),
mock.call(os.path.join(m.config.root, "host"))
]
)
# list backups
@mock.patch("mongobar.utils.os.path.exists", return_value=True)
@mock.patch("mongobar.mongobar.get_directories", return_value=["d1", "d2"])
def test__get_backups(self, *args):
m = mongobar.Mongobar()
m.get_backups()
args[0].assert_called_with(m.config.connection_dir)
@mock.patch("mongobar.utils.os.path.exists", return_value=False)
@mock.patch("mongobar.mongobar.create_directory")
@mock.patch("mongobar.mongobar.get_directories", return_value=["d1", "d2"])
def test__get_backups__directory_does_not_exist__return_empty_list(self, *args):
m = mongobar.Mongobar()
self.assertEqual(m.get_backups(), [])
# remove backup
@mock.patch("mongobar.mongobar.shutil.rmtree")
@mock.patch("mongobar.mongobar.os.path.exists", return_value=True)
def test__remove_backup(self, *args):
m = mongobar.Mongobar()
m.remove_backup("foo")
backup_directory = m.config.connection_dir
args[1].assert_called_with(os.path.join(backup_directory, "foo"))
@mock.patch("mongobar.mongobar.os.path.exists", return_value=False)
@mock.patch("mongobar.mongobar.shutil.rmtree")
def test__remove_backup__raises_BackupNotFoundError(self, *args):
m = mongobar.Mongobar()
with self.assertRaises(mongobar.exceptions.BackupNotFoundError):
m.remove_backup("foo")
| 36.378415
| 112
| 0.612903
| 2,755
| 26,629
| 5.67078
| 0.067877
| 0.168982
| 0.141458
| 0.204826
| 0.819753
| 0.79735
| 0.778916
| 0.749344
| 0.742623
| 0.692633
| 0
| 0.011572
| 0.247137
| 26,629
| 731
| 113
| 36.428181
| 0.767708
| 0.010477
| 0
| 0.656198
| 0
| 0
| 0.244104
| 0.170129
| 0
| 0
| 0
| 0
| 0.076033
| 1
| 0.076033
| false
| 0.013223
| 0.014876
| 0.008264
| 0.107438
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ee13db393f8c7ccde4df43f898e125a2589acdc
| 19,581
|
py
|
Python
|
hyperdopamine/agents/utils.py
|
Bartiik/action-hypergraph-networks
|
3d4dfddfacaaea36667fe752b0cb7f22608d75ee
|
[
"MIT"
] | 11
|
2021-03-08T04:11:10.000Z
|
2022-01-31T13:53:34.000Z
|
hyperdopamine/agents/utils.py
|
Bartiik/action-hypergraph-networks
|
3d4dfddfacaaea36667fe752b0cb7f22608d75ee
|
[
"MIT"
] | 1
|
2021-04-13T02:32:19.000Z
|
2021-06-20T09:42:23.000Z
|
hyperdopamine/agents/utils.py
|
Bartiik/action-hypergraph-networks
|
3d4dfddfacaaea36667fe752b0cb7f22608d75ee
|
[
"MIT"
] | 5
|
2021-07-07T18:32:41.000Z
|
2022-03-31T19:24:27.000Z
|
import itertools
import math
import numpy as np
def ceil_rounder(x, base=5):
return base * math.ceil(x / base)
def create_sum_order_1_map(num_branches, num_sub_actions_per_branch):
n = num_branches
m = num_sub_actions_per_branch
sub_actions = []
for _ in range(n):
sub_actions.append(np.arange(0, m))
composite_actions = list(itertools.product(*sub_actions))
composite_actions = [list(comp_tup) for comp_tup in composite_actions]
all_map_matrices_from_branches_to_composite = []
for composite_a in composite_actions:
a = np.array(composite_a)
b = np.zeros((n, m))
b[np.arange(a.size), a] = 1
b = b.flatten()
all_map_matrices_from_branches_to_composite.append(list(b))
return all_map_matrices_from_branches_to_composite
def create_sum_order_2_map(num_branches, num_sub_actions_per_branch):
n = num_branches
m = num_sub_actions_per_branch
num_branch_pairs = int(n*(n-1)/2)
sub_actions = []
for branch_id in range(n):
sub_actions.append(np.arange(branch_id*m, (branch_id+1)*m))
composite_actions = list(itertools.product(*sub_actions))
branch_pairs = []
all_edges = []
for i in range(n):
for j in range(i+1,n):
branch_pairs.append([i,j])
branch_pair_sub_actions = [sub_actions[i], sub_actions[j]]
branch_pair_composite_sub_actions = \
list(itertools.product(*branch_pair_sub_actions))
all_edges.append(branch_pair_composite_sub_actions)
assert len(branch_pairs) == len(all_edges)
all_edge_indices_per_composite_action = []
for composite_a in composite_actions:
edge_id_per_branch_pair_for_composite_a = []
for branch_pair_ids, branch_pair_composite_sub_actions in zip(
reversed(branch_pairs), reversed(all_edges)):
for edge_id, edge_sub_actions in enumerate(
branch_pair_composite_sub_actions):
if edge_sub_actions == (composite_a[branch_pair_ids[0]],
composite_a[branch_pair_ids[1]]):
edge_id_per_branch_pair_for_composite_a.append(edge_id)
break
edge_id_per_branch_pair_for_composite_a = \
list(reversed(edge_id_per_branch_pair_for_composite_a))
all_edge_indices_per_composite_action.append(
edge_id_per_branch_pair_for_composite_a)
assert len(all_edge_indices_per_composite_action) == \
len(composite_actions) == m**n
all_map_matrices_from_edges_to_composite = []
for composite_a, edge_id_per_branch_pair_for_composite_a in zip(
composite_actions, all_edge_indices_per_composite_action):
a = np.array(edge_id_per_branch_pair_for_composite_a)
b = np.zeros((num_branch_pairs, m**2))
b[np.arange(a.size), a] = 1
b = b.flatten()
all_map_matrices_from_edges_to_composite.append(list(b))
return all_map_matrices_from_edges_to_composite
def create_sum_order_3_map(num_branches, num_sub_actions_per_branch):
n = num_branches
m = num_sub_actions_per_branch
num_branch_triplets = int(n*(n-1)*(n-2)/6)
sub_actions = []
for branch_id in range(n):
sub_actions.append(np.arange(branch_id*m, (branch_id+1)*m))
composite_actions = list(itertools.product(*sub_actions))
branch_triplets = []
all_triplets = []
for i in range(n):
for j in range(i+1,n):
for k in range(j+1,n):
branch_triplets.append([i,j,k])
branch_triplet_sub_actions = [sub_actions[i],
sub_actions[j],
sub_actions[k]]
branch_triplet_composite_sub_actions = \
list(itertools.product(*branch_triplet_sub_actions))
all_triplets.append(branch_triplet_composite_sub_actions)
assert len(branch_triplets) == len(all_triplets)
all_triplet_indices_per_composite_action = []
for composite_a in composite_actions:
edge_id_per_branch_triplet_for_composite_a = []
for branch_triplet_ids, branch_triplet_composite_sub_actions in zip(
reversed(branch_triplets), reversed(all_triplets)):
for edge_id, edge_sub_actions in enumerate(
branch_triplet_composite_sub_actions):
if edge_sub_actions == (composite_a[branch_triplet_ids[0]],
composite_a[branch_triplet_ids[1]],
composite_a[branch_triplet_ids[2]]):
edge_id_per_branch_triplet_for_composite_a.append(edge_id)
break
edge_id_per_branch_triplet_for_composite_a = list(
reversed(edge_id_per_branch_triplet_for_composite_a))
all_triplet_indices_per_composite_action.append(
edge_id_per_branch_triplet_for_composite_a)
assert len(all_triplet_indices_per_composite_action) == \
len(composite_actions) == m**n
all_map_matrices_from_triplets_to_composite = []
for composite_a, edge_id_per_branch_triplet_for_composite_a in zip(
composite_actions, all_triplet_indices_per_composite_action):
a = np.array(edge_id_per_branch_triplet_for_composite_a)
b = np.zeros((num_branch_triplets, m**3))
b[np.arange(a.size), a] = 1
b = b.flatten()
all_map_matrices_from_triplets_to_composite.append(list(b))
return all_map_matrices_from_triplets_to_composite
def create_general_order_1_map(num_branches, num_sub_actions_per_branch):
n = num_branches
m = num_sub_actions_per_branch
sub_actions = []
for _ in range(n):
sub_actions.append(np.arange(0, m))
composite_actions = list(itertools.product(*sub_actions))
composite_actions = [list(comp_tup) for comp_tup in composite_actions]
all_map_matrices_from_branches_to_composite = []
for composite_a in composite_actions:
a = np.array(composite_a)
b = np.zeros((n, m), np.float32)
b[np.arange(a.size), a] = 1
all_map_matrices_from_branches_to_composite.append(list(b))
return all_map_matrices_from_branches_to_composite
def create_general_order_2_map(num_branches, num_sub_actions_per_branch):
n = num_branches
m = num_sub_actions_per_branch
num_branch_pairs = int(n*(n-1)/2)
sub_actions = []
for branch_id in range(n):
sub_actions.append(np.arange(branch_id*m, (branch_id+1)*m))
composite_actions = list(itertools.product(*sub_actions))
branch_pairs = []
all_edges = []
for i in range(n):
for j in range(i+1,n):
branch_pairs.append([i,j])
branch_pair_sub_actions = [sub_actions[i], sub_actions[j]]
branch_pair_composite_sub_actions = list(
itertools.product(*branch_pair_sub_actions))
all_edges.append(branch_pair_composite_sub_actions)
assert len(branch_pairs) == len(all_edges)
all_edge_indices_per_composite_action = []
for composite_a in composite_actions:
edge_id_per_branch_pair_for_composite_a = []
for branch_pair_ids, branch_pair_composite_sub_actions in zip(
reversed(branch_pairs), reversed(all_edges)):
for edge_id, edge_sub_actions in enumerate(
branch_pair_composite_sub_actions):
if edge_sub_actions == (composite_a[branch_pair_ids[0]],
composite_a[branch_pair_ids[1]]):
edge_id_per_branch_pair_for_composite_a.append(edge_id)
break
edge_id_per_branch_pair_for_composite_a = list(
reversed(edge_id_per_branch_pair_for_composite_a))
all_edge_indices_per_composite_action.append(
edge_id_per_branch_pair_for_composite_a)
assert len(all_edge_indices_per_composite_action) == \
len(composite_actions) == m**n
all_map_matrices_from_edges_to_composite = []
for composite_a, edge_id_per_branch_pair_for_composite_a in zip(
composite_actions, all_edge_indices_per_composite_action):
a = np.array(edge_id_per_branch_pair_for_composite_a)
b = np.zeros((num_branch_pairs, m**2), np.float32)
b[np.arange(a.size), a] = 1
all_map_matrices_from_edges_to_composite.append(list(b))
return all_map_matrices_from_edges_to_composite
def create_general_order_3_map(num_branches, num_sub_actions_per_branch):
n = num_branches
m = num_sub_actions_per_branch
num_branch_triplets = int(n*(n-1)*(n-2)/6)
sub_actions = []
for branch_id in range(n):
sub_actions.append(np.arange(branch_id*m, (branch_id+1)*m))
composite_actions = list(itertools.product(*sub_actions))
branch_triplets = []
all_triplets = []
for i in range(n):
for j in range(i+1,n):
for k in range(j+1,n):
branch_triplets.append([i,j,k])
branch_triplet_sub_actions = [sub_actions[i],
sub_actions[j],
sub_actions[k]]
branch_triplet_composite_sub_actions = \
list(itertools.product(*branch_triplet_sub_actions))
all_triplets.append(branch_triplet_composite_sub_actions)
assert len(branch_triplets) == len(all_triplets)
all_triplet_indices_per_composite_action = []
for composite_a in composite_actions:
edge_id_per_branch_triplet_for_composite_a = []
for branch_triplet_ids, branch_triplet_composite_sub_actions in zip(
reversed(branch_triplets), reversed(all_triplets)):
for edge_id, edge_sub_actions in enumerate(
branch_triplet_composite_sub_actions):
if edge_sub_actions == (composite_a[branch_triplet_ids[0]],
composite_a[branch_triplet_ids[1]],
composite_a[branch_triplet_ids[2]]):
edge_id_per_branch_triplet_for_composite_a.append(edge_id)
break
edge_id_per_branch_triplet_for_composite_a = list(
reversed(edge_id_per_branch_triplet_for_composite_a))
all_triplet_indices_per_composite_action.append(
edge_id_per_branch_triplet_for_composite_a)
assert len(all_triplet_indices_per_composite_action) == \
len(composite_actions) == m**n
all_map_matrices_from_triplets_to_composite = []
for composite_a, edge_id_per_branch_triplet_for_composite_a in zip(
composite_actions, all_triplet_indices_per_composite_action):
a = np.array(edge_id_per_branch_triplet_for_composite_a)
b = np.zeros((num_branch_triplets, m**3), np.float32)
b[np.arange(a.size), a] = 1
all_map_matrices_from_triplets_to_composite.append(list(b))
return all_map_matrices_from_triplets_to_composite
SUM_ORDER_1_MAP = \
[[1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0]]
SUM_ORDER_2_MAP = \
[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]
GENERAL_ORDER_1_MAP = \
[[np.array([1., 0., 0.], dtype=np.float32),
np.array([1., 0., 0.], dtype=np.float32),
np.array([1., 0.], dtype=np.float32)],
[np.array([1., 0., 0.], dtype=np.float32),
np.array([1., 0., 0.], dtype=np.float32),
np.array([0., 1.], dtype=np.float32)],
[np.array([1., 0., 0.], dtype=np.float32),
np.array([0., 1., 0.], dtype=np.float32),
np.array([1., 0.], dtype=np.float32)],
[np.array([1., 0., 0.], dtype=np.float32),
np.array([0., 1., 0.], dtype=np.float32),
np.array([0., 1.], dtype=np.float32)],
[np.array([1., 0., 0.], dtype=np.float32),
np.array([0., 0., 1.], dtype=np.float32),
np.array([1., 0.], dtype=np.float32)],
[np.array([1., 0., 0.], dtype=np.float32),
np.array([0., 0., 1.], dtype=np.float32),
np.array([0., 1.], dtype=np.float32)],
[np.array([0., 1., 0.], dtype=np.float32),
np.array([1., 0., 0.], dtype=np.float32),
np.array([1., 0.], dtype=np.float32)],
[np.array([0., 1., 0.], dtype=np.float32),
np.array([1., 0., 0.], dtype=np.float32),
np.array([0., 1.], dtype=np.float32)],
[np.array([0., 1., 0.], dtype=np.float32),
np.array([0., 1., 0.], dtype=np.float32),
np.array([1., 0.], dtype=np.float32)],
[np.array([0., 1., 0.], dtype=np.float32),
np.array([0., 1., 0.], dtype=np.float32),
np.array([0., 1.], dtype=np.float32)],
[np.array([0., 1., 0.], dtype=np.float32),
np.array([0., 0., 1.], dtype=np.float32),
np.array([1., 0.], dtype=np.float32)],
[np.array([0., 1., 0.], dtype=np.float32),
np.array([0., 0., 1.], dtype=np.float32),
np.array([0., 1.], dtype=np.float32)],
[np.array([0., 0., 1.], dtype=np.float32),
np.array([1., 0., 0.], dtype=np.float32),
np.array([1., 0.], dtype=np.float32)],
[np.array([0., 0., 1.], dtype=np.float32),
np.array([1., 0., 0.], dtype=np.float32),
np.array([0., 1.], dtype=np.float32)],
[np.array([0., 0., 1.], dtype=np.float32),
np.array([0., 1., 0.], dtype=np.float32),
np.array([1., 0.], dtype=np.float32)],
[np.array([0., 0., 1.], dtype=np.float32),
np.array([0., 1., 0.], dtype=np.float32),
np.array([0., 1.], dtype=np.float32)],
[np.array([0., 0., 1.], dtype=np.float32),
np.array([0., 0., 1.], dtype=np.float32),
np.array([1., 0.], dtype=np.float32)],
[np.array([0., 0., 1.], dtype=np.float32),
np.array([0., 0., 1.], dtype=np.float32),
np.array([0., 1.], dtype=np.float32)]]
GENERAL_ORDER_2_MAP = \
[[np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)],
[np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 1.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 1.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 1.0, 0.0], dtype=np.float32),
np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32),
np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 1.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 1.0, 0.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 1.0, 0.0], dtype=np.float32)],
[np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32),
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=np.float32)]]
| 48.229064
| 111
| 0.598437
| 4,019
| 19,581
| 2.7263
| 0.018413
| 0.265401
| 0.339235
| 0.385507
| 0.987314
| 0.987314
| 0.983481
| 0.982933
| 0.982933
| 0.982933
| 0
| 0.141302
| 0.197998
| 19,581
| 405
| 112
| 48.348148
| 0.556419
| 0
| 0
| 0.808743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021858
| 1
| 0.019126
| false
| 0
| 0.008197
| 0.002732
| 0.046448
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
2ee92b859a18ab3d982b3252b892472c6de88b62
| 11,882
|
py
|
Python
|
src/triage/component/results_schema/alembic/versions/b4d7569d31cb_aequitas.py
|
josephbajor/triage_NN
|
cbaee6e5a06e597c91fec372717d89a2b5f34fa5
|
[
"MIT"
] | 160
|
2017-06-13T09:59:59.000Z
|
2022-03-21T22:00:35.000Z
|
src/triage/component/results_schema/alembic/versions/b4d7569d31cb_aequitas.py
|
josephbajor/triage_NN
|
cbaee6e5a06e597c91fec372717d89a2b5f34fa5
|
[
"MIT"
] | 803
|
2016-10-21T19:44:02.000Z
|
2022-03-29T00:02:33.000Z
|
src/triage/component/results_schema/alembic/versions/b4d7569d31cb_aequitas.py
|
josephbajor/triage_NN
|
cbaee6e5a06e597c91fec372717d89a2b5f34fa5
|
[
"MIT"
] | 59
|
2017-01-31T22:10:22.000Z
|
2022-03-19T12:35:03.000Z
|
"""aequitas
Revision ID: b4d7569d31cb
Revises: 609c7cc51794
Create Date: 2019-05-07 11:56:03.814097
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b4d7569d31cb'
down_revision = '609c7cc51794'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('aequitas',
sa.Column('model_id', sa.Integer(), nullable=False),
sa.Column('subset_hash', sa.String(), nullable=False),
sa.Column('tie_breaker', sa.String(), nullable=False),
sa.Column('evaluation_start_time', sa.DateTime(), nullable=False),
sa.Column('evaluation_end_time', sa.DateTime(), nullable=False),
sa.Column('matrix_uuid', sa.Text(), nullable=True),
sa.Column('parameter', sa.String(), nullable=False),
sa.Column('attribute_name', sa.String(), nullable=False),
sa.Column('attribute_value', sa.String(), nullable=False),
sa.Column('total_entities', sa.Integer(), nullable=True),
sa.Column('group_label_pos', sa.Integer(), nullable=True),
sa.Column('group_label_neg', sa.Integer(), nullable=True),
sa.Column('group_size', sa.Integer(), nullable=True),
sa.Column('group_size_pct', sa.Numeric(), nullable=True),
sa.Column('prev', sa.Numeric(), nullable=True),
sa.Column('pp', sa.Integer(), nullable=True),
sa.Column('pn', sa.Integer(), nullable=True),
sa.Column('fp', sa.Integer(), nullable=True),
sa.Column('fn', sa.Integer(), nullable=True),
sa.Column('tn', sa.Integer(), nullable=True),
sa.Column('tp', sa.Integer(), nullable=True),
sa.Column('ppr', sa.Numeric(), nullable=True),
sa.Column('pprev', sa.Numeric(), nullable=True),
sa.Column('tpr', sa.Numeric(), nullable=True),
sa.Column('tnr', sa.Numeric(), nullable=True),
sa.Column('for', sa.Numeric(), nullable=True),
sa.Column('fdr', sa.Numeric(), nullable=True),
sa.Column('fpr', sa.Numeric(), nullable=True),
sa.Column('fnr', sa.Numeric(), nullable=True),
sa.Column('npv', sa.Numeric(), nullable=True),
sa.Column('precision', sa.Numeric(), nullable=True),
sa.Column('ppr_disparity', sa.Numeric(), nullable=True),
sa.Column('ppr_ref_group_value', sa.String(), nullable=True),
sa.Column('pprev_disparity', sa.Numeric(), nullable=True),
sa.Column('pprev_ref_group_value', sa.String(), nullable=True),
sa.Column('precision_disparity', sa.Numeric(), nullable=True),
sa.Column('precision_ref_group_value', sa.String(), nullable=True),
sa.Column('fdr_disparity', sa.Numeric(), nullable=True),
sa.Column('fdr_ref_group_value', sa.String(), nullable=True),
sa.Column('for_disparity', sa.Numeric(), nullable=True),
sa.Column('for_ref_group_value', sa.String(), nullable=True),
sa.Column('fpr_disparity', sa.Numeric(), nullable=True),
sa.Column('fpr_ref_group_value', sa.String(), nullable=True),
sa.Column('fnr_disparity', sa.Numeric(), nullable=True),
sa.Column('fnr_ref_group_value', sa.String(), nullable=True),
sa.Column('tpr_disparity', sa.Numeric(), nullable=True),
sa.Column('tpr_ref_group_value', sa.String(), nullable=True),
sa.Column('tnr_disparity', sa.Numeric(), nullable=True),
sa.Column('tnr_ref_group_value', sa.String(), nullable=True),
sa.Column('npv_disparity', sa.Numeric(), nullable=True),
sa.Column('npv_ref_group_value', sa.String(), nullable=True),
sa.Column('Statistical_Parity', sa.Boolean(), nullable=True),
sa.Column('Impact_Parity', sa.Boolean(), nullable=True),
sa.Column('FDR_Parity', sa.Boolean(), nullable=True),
sa.Column('FPR_Parity', sa.Boolean(), nullable=True),
sa.Column('FOR_Parity', sa.Boolean(), nullable=True),
sa.Column('FNR_Parity', sa.Boolean(), nullable=True),
sa.Column('TypeI_Parity', sa.Boolean(), nullable=True),
sa.Column('TypeII_Parity', sa.Boolean(), nullable=True),
sa.Column('Equalized_Odds', sa.Boolean(), nullable=True),
sa.Column('Unsupervised_Fairness', sa.Boolean(), nullable=True),
sa.Column('Supervised_Fairness', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['matrix_uuid'], ['model_metadata.matrices.matrix_uuid'], ),
sa.ForeignKeyConstraint(['model_id'], ['model_metadata.models.model_id'], ),
sa.PrimaryKeyConstraint('model_id', 'subset_hash', 'tie_breaker', 'evaluation_start_time', 'evaluation_end_time', 'parameter', 'attribute_name', 'attribute_value'),
schema='test_results'
)
op.create_index(op.f('ix_test_results_aequitas_attribute_name'), 'aequitas', ['attribute_name'], unique=False, schema='test_results')
op.create_index(op.f('ix_test_results_aequitas_attribute_value'), 'aequitas', ['attribute_value'], unique=False, schema='test_results')
op.create_index(op.f('ix_test_results_aequitas_evaluation_end_time'), 'aequitas', ['evaluation_end_time'], unique=False, schema='test_results')
op.create_index(op.f('ix_test_results_aequitas_evaluation_start_time'), 'aequitas', ['evaluation_start_time'], unique=False, schema='test_results')
op.create_index(op.f('ix_test_results_aequitas_model_id'), 'aequitas', ['model_id'], unique=False, schema='test_results')
op.create_index(op.f('ix_test_results_aequitas_parameter'), 'aequitas', ['parameter'], unique=False, schema='test_results')
op.create_table('aequitas',
sa.Column('model_id', sa.Integer(), nullable=False),
sa.Column('subset_hash', sa.String(), nullable=False),
sa.Column('tie_breaker', sa.String(), nullable=False),
sa.Column('evaluation_start_time', sa.DateTime(), nullable=False),
sa.Column('evaluation_end_time', sa.DateTime(), nullable=False),
sa.Column('matrix_uuid', sa.Text(), nullable=True),
sa.Column('parameter', sa.String(), nullable=False),
sa.Column('attribute_name', sa.String(), nullable=False),
sa.Column('attribute_value', sa.String(), nullable=False),
sa.Column('total_entities', sa.Integer(), nullable=True),
sa.Column('group_label_pos', sa.Integer(), nullable=True),
sa.Column('group_label_neg', sa.Integer(), nullable=True),
sa.Column('group_size', sa.Integer(), nullable=True),
sa.Column('group_size_pct', sa.Numeric(), nullable=True),
sa.Column('prev', sa.Numeric(), nullable=True),
sa.Column('pp', sa.Integer(), nullable=True),
sa.Column('pn', sa.Integer(), nullable=True),
sa.Column('fp', sa.Integer(), nullable=True),
sa.Column('fn', sa.Integer(), nullable=True),
sa.Column('tn', sa.Integer(), nullable=True),
sa.Column('tp', sa.Integer(), nullable=True),
sa.Column('ppr', sa.Numeric(), nullable=True),
sa.Column('pprev', sa.Numeric(), nullable=True),
sa.Column('tpr', sa.Numeric(), nullable=True),
sa.Column('tnr', sa.Numeric(), nullable=True),
sa.Column('for', sa.Numeric(), nullable=True),
sa.Column('fdr', sa.Numeric(), nullable=True),
sa.Column('fpr', sa.Numeric(), nullable=True),
sa.Column('fnr', sa.Numeric(), nullable=True),
sa.Column('npv', sa.Numeric(), nullable=True),
sa.Column('precision', sa.Numeric(), nullable=True),
sa.Column('ppr_disparity', sa.Numeric(), nullable=True),
sa.Column('ppr_ref_group_value', sa.String(), nullable=True),
sa.Column('pprev_disparity', sa.Numeric(), nullable=True),
sa.Column('pprev_ref_group_value', sa.String(), nullable=True),
sa.Column('precision_disparity', sa.Numeric(), nullable=True),
sa.Column('precision_ref_group_value', sa.String(), nullable=True),
sa.Column('fdr_disparity', sa.Numeric(), nullable=True),
sa.Column('fdr_ref_group_value', sa.String(), nullable=True),
sa.Column('for_disparity', sa.Numeric(), nullable=True),
sa.Column('for_ref_group_value', sa.String(), nullable=True),
sa.Column('fpr_disparity', sa.Numeric(), nullable=True),
sa.Column('fpr_ref_group_value', sa.String(), nullable=True),
sa.Column('fnr_disparity', sa.Numeric(), nullable=True),
sa.Column('fnr_ref_group_value', sa.String(), nullable=True),
sa.Column('tpr_disparity', sa.Numeric(), nullable=True),
sa.Column('tpr_ref_group_value', sa.String(), nullable=True),
sa.Column('tnr_disparity', sa.Numeric(), nullable=True),
sa.Column('tnr_ref_group_value', sa.String(), nullable=True),
sa.Column('npv_disparity', sa.Numeric(), nullable=True),
sa.Column('npv_ref_group_value', sa.String(), nullable=True),
sa.Column('Statistical_Parity', sa.Boolean(), nullable=True),
sa.Column('Impact_Parity', sa.Boolean(), nullable=True),
sa.Column('FDR_Parity', sa.Boolean(), nullable=True),
sa.Column('FPR_Parity', sa.Boolean(), nullable=True),
sa.Column('FOR_Parity', sa.Boolean(), nullable=True),
sa.Column('FNR_Parity', sa.Boolean(), nullable=True),
sa.Column('TypeI_Parity', sa.Boolean(), nullable=True),
sa.Column('TypeII_Parity', sa.Boolean(), nullable=True),
sa.Column('Equalized_Odds', sa.Boolean(), nullable=True),
sa.Column('Unsupervised_Fairness', sa.Boolean(), nullable=True),
sa.Column('Supervised_Fairness', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['matrix_uuid'], ['model_metadata.matrices.matrix_uuid'], ),
sa.ForeignKeyConstraint(['model_id'], ['model_metadata.models.model_id'], ),
sa.PrimaryKeyConstraint('model_id', 'subset_hash', 'tie_breaker', 'evaluation_start_time', 'evaluation_end_time', 'parameter', 'attribute_name', 'attribute_value'),
schema='train_results'
)
op.create_index(op.f('ix_train_results_aequitas_attribute_name'), 'aequitas', ['attribute_name'], unique=False, schema='train_results')
op.create_index(op.f('ix_train_results_aequitas_attribute_value'), 'aequitas', ['attribute_value'], unique=False, schema='train_results')
op.create_index(op.f('ix_train_results_aequitas_evaluation_end_time'), 'aequitas', ['evaluation_end_time'], unique=False, schema='train_results')
op.create_index(op.f('ix_train_results_aequitas_evaluation_start_time'), 'aequitas', ['evaluation_start_time'], unique=False, schema='train_results')
op.create_index(op.f('ix_train_results_aequitas_model_id'), 'aequitas', ['model_id'], unique=False, schema='train_results')
op.create_index(op.f('ix_train_results_aequitas_parameter'), 'aequitas', ['parameter'], unique=False, schema='train_results')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_train_results_aequitas_parameter'), table_name='aequitas', schema='train_results')
op.drop_index(op.f('ix_train_results_aequitas_model_id'), table_name='aequitas', schema='train_results')
op.drop_index(op.f('ix_train_results_aequitas_evaluation_start_time'), table_name='aequitas', schema='train_results')
op.drop_index(op.f('ix_train_results_aequitas_evaluation_end_time'), table_name='aequitas', schema='train_results')
op.drop_index(op.f('ix_train_results_aequitas_attribute_value'), table_name='aequitas', schema='train_results')
op.drop_index(op.f('ix_train_results_aequitas_attribute_name'), table_name='aequitas', schema='train_results')
op.drop_table('aequitas', schema='train_results')
op.drop_index(op.f('ix_test_results_aequitas_parameter'), table_name='aequitas', schema='test_results')
op.drop_index(op.f('ix_test_results_aequitas_model_id'), table_name='aequitas', schema='test_results')
op.drop_index(op.f('ix_test_results_aequitas_evaluation_start_time'), table_name='aequitas', schema='test_results')
op.drop_index(op.f('ix_test_results_aequitas_evaluation_end_time'), table_name='aequitas', schema='test_results')
op.drop_index(op.f('ix_test_results_aequitas_attribute_value'), table_name='aequitas', schema='test_results')
op.drop_index(op.f('ix_test_results_aequitas_attribute_name'), table_name='aequitas', schema='test_results')
op.drop_table('aequitas', schema='test_results')
# ### end Alembic commands ###
| 62.867725
| 168
| 0.712675
| 1,609
| 11,882
| 5.028589
| 0.072094
| 0.122605
| 0.186874
| 0.26202
| 0.963911
| 0.962427
| 0.959461
| 0.958102
| 0.924855
| 0.91645
| 0
| 0.004892
| 0.105454
| 11,882
| 188
| 169
| 63.202128
| 0.756327
| 0.024407
| 0
| 0.776471
| 0
| 0
| 0.308438
| 0.116486
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011765
| false
| 0
| 0.011765
| 0
| 0.023529
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
2efe8e172a6509efd1ce4bee813cdd5eadbcff46
| 6,282
|
py
|
Python
|
backend/unpp_api/apps/partner/roles.py
|
unicef/un-partner-portal
|
73afa193a5f6d626928cae0025c72a17f0ef8f61
|
[
"Apache-2.0"
] | 6
|
2017-11-21T10:00:44.000Z
|
2022-02-12T16:51:48.000Z
|
backend/unpp_api/apps/partner/roles.py
|
unicef/un-partner-portal
|
73afa193a5f6d626928cae0025c72a17f0ef8f61
|
[
"Apache-2.0"
] | 995
|
2017-07-31T02:08:36.000Z
|
2022-03-08T22:44:03.000Z
|
backend/unpp_api/apps/partner/roles.py
|
unicef/un-partner-portal
|
73afa193a5f6d626928cae0025c72a17f0ef8f61
|
[
"Apache-2.0"
] | 1
|
2021-07-21T10:45:15.000Z
|
2021-07-21T10:45:15.000Z
|
from enum import unique, auto
from partner.permissions import PartnerPermission
from common.enums import AutoNameEnum
@unique
class PartnerRole(AutoNameEnum):
"""
Editing names here WILL break roles saved in DB
"""
ADMIN = auto()
EDITOR = auto()
READER = auto()
@classmethod
def get_choices(cls):
return [(role.name, ROLE_LABELS[role]) for role in cls]
ROLE_LABELS = {
PartnerRole.ADMIN: 'Administrator',
PartnerRole.EDITOR: 'Editor',
PartnerRole.READER: 'Reader',
}
# HQ Roles have different permission scopes than ordinary roles
PARTNER_ROLE_PERMISSIONS = {
True: { # International CSO HQ (old name INGO HQ)
PartnerRole.ADMIN: frozenset([
PartnerPermission.REGISTER,
PartnerPermission.CREATE_COUNTRY_OFFICE,
PartnerPermission.CFEI_VIEW,
PartnerPermission.VIEW_DASHBOARD,
PartnerPermission.MANAGE_USERS,
PartnerPermission.CFEI_PINNING,
PartnerPermission.CFEI_ANSWER_SELECTION,
PartnerPermission.CFEI_SEND_CLARIFICATION_REQUEST,
PartnerPermission.UCN_VIEW,
PartnerPermission.CFEI_SUBMIT_CONCEPT_NOTE,
PartnerPermission.UCN_DRAFT,
PartnerPermission.RECEIVE_NOTIFICATIONS,
PartnerPermission.UCN_EDIT,
PartnerPermission.UCN_SUBMIT,
PartnerPermission.UCN_DELETE,
PartnerPermission.EDIT_PROFILE,
PartnerPermission.EDIT_HQ_PROFILE,
PartnerPermission.DSR_VIEW,
PartnerPermission.DSR_ANSWER,
]),
PartnerRole.EDITOR: frozenset([
PartnerPermission.CREATE_COUNTRY_OFFICE,
PartnerPermission.CFEI_VIEW,
PartnerPermission.VIEW_DASHBOARD,
PartnerPermission.CFEI_PINNING,
PartnerPermission.CFEI_ANSWER_SELECTION,
PartnerPermission.CFEI_SEND_CLARIFICATION_REQUEST,
PartnerPermission.UCN_VIEW,
PartnerPermission.UCN_DRAFT,
PartnerPermission.UCN_EDIT,
PartnerPermission.RECEIVE_NOTIFICATIONS,
PartnerPermission.CFEI_SUBMIT_CONCEPT_NOTE,
PartnerPermission.UCN_SUBMIT,
PartnerPermission.UCN_DELETE,
PartnerPermission.EDIT_PROFILE,
PartnerPermission.EDIT_HQ_PROFILE,
PartnerPermission.DSR_VIEW,
PartnerPermission.DSR_ANSWER,
]),
PartnerRole.READER: frozenset([
PartnerPermission.VIEW_DASHBOARD,
PartnerPermission.CFEI_VIEW,
PartnerPermission.UCN_VIEW,
PartnerPermission.DSR_VIEW,
]),
},
False: { # International CSO (old name INGO) Country Profile
PartnerRole.ADMIN: frozenset([
PartnerPermission.VIEW_DASHBOARD,
PartnerPermission.CFEI_VIEW,
PartnerPermission.MANAGE_USERS,
PartnerPermission.CFEI_PINNING,
PartnerPermission.CFEI_ANSWER_SELECTION,
PartnerPermission.CFEI_SEND_CLARIFICATION_REQUEST,
PartnerPermission.UCN_VIEW,
PartnerPermission.UCN_DRAFT,
PartnerPermission.RECEIVE_NOTIFICATIONS,
PartnerPermission.UCN_EDIT,
PartnerPermission.UCN_SUBMIT,
PartnerPermission.CFEI_SUBMIT_CONCEPT_NOTE,
PartnerPermission.UCN_DELETE,
PartnerPermission.EDIT_PROFILE,
PartnerPermission.DSR_VIEW,
PartnerPermission.DSR_ANSWER,
]),
PartnerRole.EDITOR: frozenset([
PartnerPermission.VIEW_DASHBOARD,
PartnerPermission.CFEI_VIEW,
PartnerPermission.CFEI_PINNING,
PartnerPermission.CFEI_ANSWER_SELECTION,
PartnerPermission.CFEI_SEND_CLARIFICATION_REQUEST,
PartnerPermission.UCN_VIEW,
PartnerPermission.UCN_DRAFT,
PartnerPermission.UCN_EDIT,
PartnerPermission.RECEIVE_NOTIFICATIONS,
PartnerPermission.UCN_SUBMIT,
PartnerPermission.CFEI_SUBMIT_CONCEPT_NOTE,
PartnerPermission.UCN_DELETE,
PartnerPermission.EDIT_PROFILE,
PartnerPermission.DSR_VIEW,
PartnerPermission.DSR_ANSWER,
]),
PartnerRole.READER: frozenset([
PartnerPermission.VIEW_DASHBOARD,
PartnerPermission.CFEI_VIEW,
PartnerPermission.UCN_VIEW,
PartnerPermission.DSR_VIEW,
]),
},
None: { # NGO
PartnerRole.ADMIN: frozenset([
PartnerPermission.REGISTER,
PartnerPermission.VIEW_DASHBOARD,
PartnerPermission.MANAGE_USERS,
PartnerPermission.CFEI_VIEW,
PartnerPermission.CFEI_PINNING,
PartnerPermission.CFEI_ANSWER_SELECTION,
PartnerPermission.CFEI_SEND_CLARIFICATION_REQUEST,
PartnerPermission.UCN_VIEW,
PartnerPermission.UCN_DRAFT,
PartnerPermission.RECEIVE_NOTIFICATIONS,
PartnerPermission.CFEI_SUBMIT_CONCEPT_NOTE,
PartnerPermission.UCN_EDIT,
PartnerPermission.UCN_SUBMIT,
PartnerPermission.UCN_DELETE,
PartnerPermission.EDIT_PROFILE,
PartnerPermission.DSR_VIEW,
PartnerPermission.DSR_ANSWER,
]),
PartnerRole.EDITOR: frozenset([
PartnerPermission.VIEW_DASHBOARD,
PartnerPermission.CFEI_VIEW,
PartnerPermission.CFEI_PINNING,
PartnerPermission.CFEI_ANSWER_SELECTION,
PartnerPermission.CFEI_SEND_CLARIFICATION_REQUEST,
PartnerPermission.UCN_VIEW,
PartnerPermission.UCN_DRAFT,
PartnerPermission.UCN_EDIT,
PartnerPermission.CFEI_SUBMIT_CONCEPT_NOTE,
PartnerPermission.RECEIVE_NOTIFICATIONS,
PartnerPermission.UCN_SUBMIT,
PartnerPermission.UCN_DELETE,
PartnerPermission.EDIT_PROFILE,
PartnerPermission.DSR_VIEW,
PartnerPermission.DSR_ANSWER,
]),
PartnerRole.READER: frozenset([
PartnerPermission.VIEW_DASHBOARD,
PartnerPermission.CFEI_VIEW,
PartnerPermission.UCN_VIEW,
PartnerPermission.DSR_VIEW,
]),
}
}
| 37.843373
| 65
| 0.656638
| 482
| 6,282
| 8.251037
| 0.161826
| 0.174252
| 0.056575
| 0.095047
| 0.865476
| 0.865476
| 0.835052
| 0.820468
| 0.789791
| 0.789791
| 0
| 0
| 0.286215
| 6,282
| 165
| 66
| 38.072727
| 0.886931
| 0.032474
| 0
| 0.843137
| 0
| 0
| 0.004126
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006536
| false
| 0
| 0.019608
| 0.006536
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2c073a6a83b4c85aa2289b0ae0515e969186bc18
| 1,274
|
py
|
Python
|
model/params.py
|
ns50677/ntua-slp-semeval2018
|
8343e7fd5b12efb16d47fcc4f80c7e23bd949905
|
[
"MIT"
] | null | null | null |
model/params.py
|
ns50677/ntua-slp-semeval2018
|
8343e7fd5b12efb16d47fcc4f80c7e23bd949905
|
[
"MIT"
] | null | null | null |
model/params.py
|
ns50677/ntua-slp-semeval2018
|
8343e7fd5b12efb16d47fcc4f80c7e23bd949905
|
[
"MIT"
] | null | null | null |
"""
Model Configurations
"""
TASK3_A = {
"name": "TASK3_A",
"token_type": "word",
"batch_train": 64,
"batch_eval": 64,
"epochs": 50,
"embeddings_file": "ntua_twitter_affect_310",
"embed_dim": 310,
"embed_finetune": False,
"embed_noise": 0.05,
"embed_dropout": 0.1,
"encoder_dropout": 0.2,
"encoder_size": 150,
"encoder_layers": 2,
"encoder_bidirectional": True,
"attention": True,
"attention_layers": 1,
"attention_context": False,
"attention_activation": "tanh",
"attention_dropout": 0.0,
"base": 0.7,
"patience": 10,
"weight_decay": 0.0,
"clip_norm": 1,
}
TASK3_B = {
"name": "TASK3_B",
"token_type": "word",
"batch_train": 32,
"batch_eval": 32,
"epochs": 50,
"embeddings_file": "ntua_twitter_affect_310",
"embed_dim": 310,
"embed_finetune": False,
"embed_noise": 0.2,
"embed_dropout": 0.1,
"encoder_dropout": 0.2,
"encoder_size": 150,
"encoder_layers": 2,
"encoder_bidirectional": True,
"attention": True,
"attention_layers": 1,
"attention_context": False,
"attention_activation": "tanh",
"attention_dropout": 0.0,
"base": 0.3,
"patience": 10,
"weight_decay": 0.0,
"clip_norm": 1,
}
| 21.965517
| 49
| 0.594976
| 154
| 1,274
| 4.623377
| 0.331169
| 0.067416
| 0.036517
| 0.050562
| 0.884831
| 0.820225
| 0.820225
| 0.820225
| 0.820225
| 0.730337
| 0
| 0.070265
| 0.229199
| 1,274
| 58
| 50
| 21.965517
| 0.654786
| 0.015699
| 0
| 0.72
| 0
| 0
| 0.506024
| 0.070683
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2c1aace0d57a75d513d2aa0749be6bd748d48614
| 28,692
|
py
|
Python
|
h3/defs/chdt.py
|
holy-crust/reclaimer
|
0aa693da3866ce7999c68d5f71f31a9c932cdb2c
|
[
"MIT"
] | null | null | null |
h3/defs/chdt.py
|
holy-crust/reclaimer
|
0aa693da3866ce7999c68d5f71f31a9c932cdb2c
|
[
"MIT"
] | null | null | null |
h3/defs/chdt.py
|
holy-crust/reclaimer
|
0aa693da3866ce7999c68d5f71f31a9c932cdb2c
|
[
"MIT"
] | null | null | null |
############# Credits and version info #############
# Definition generated from Assembly XML tag def
# Date generated: 2018/12/03 04:56
#
# revision: 1 author: Assembly
# Generated plugin from scratch.
# revision: 2 author: -DeToX-
# Fixed up the layout, mapped out some values..
# revision: 3 author: DarkShallFall
# More unknowns and types.
# revision: 4 author: DarkShallFall
# Plugin getting close to complete. A little more effort and we will be there.
# revision: 5 author: Lord Zedd
# Overhauled.
# revision: 6 author: Moses_of_Egypt
# Cleaned up and converted to SuPyr definition
#
####################################################
from ..common_descs import *
from .objs.tag import *
from supyr_struct.defs.tag_def import TagDef
chdt_hud_widget_animation_data_animation_1_function = (
"default",
"use_input",
"use_range_input",
"zero",
)
chdt_hud_widget_placement_data_anchor = (
"top_left",
"top_right",
"bottom_right",
"bottom_left",
"center",
"top_edge",
"grenade_a",
"grenade_b",
"grenade_c",
"grenade_d",
"scoreboard_friendly",
"scoreboard_enemy",
"health_and_shield",
"bottom_edge",
"unknown_0",
"equipment",
"unknown_1",
"depreciated_0",
"depreciated_1",
"depreciated_2",
"depreciated_3",
"depreciated_4",
"unknown_2",
"gametype",
"unknown_3",
"state_right",
"state_left",
"state_center",
"unknown_4",
"gametype_friendly",
"gametype_enemy",
"metagame_top",
"metagame_player_1",
"metagame_player_2",
"metagame_player_3",
"metagame_player_4",
"theater",
)
chdt_hud_widget_render_data_input = (
"zero",
"one",
"time",
"fade",
"unit_shield_current",
"unit_shield",
"clip_ammo_fraction",
"total_ammo_fraction",
"heat_fraction",
"battery_fraction",
"pickup",
"unit_autoaimed",
"grenade",
"grenade_fraction",
"charge_fraction",
"friendly_score",
"enemy_score",
"score_to_win",
"arming_fraction",
"unknown_0",
"unit_1x_overshield_current",
"unit_1x_overshield",
"unit_2x_overshield_current",
"unit_2x_overshield",
"unit_3x_overshield_current",
"unit_3x_overshield",
"aim_yaw",
"aim_pitch",
"target_distance",
"target_elevation",
"editor_budget",
"editor_budget_cost",
"film_total_time",
"film_current_time",
"unknown_1",
"film_timeline_fraction_1",
"film_timeline_fraction_2",
"unknown_2",
"unknown_3",
"metagame_time",
"metagame_score_transient",
"metagame_score_player_1",
"metagame_score_player_2",
"metagame_score_player_3",
"metagame_score_player_4",
"metagame_modifier",
"unknown_4",
"sensor_range",
"netdebug_latency",
"netdebug_latency_quality",
"netdebug_host_quality",
"netdebug_local_quality",
"metagame_score_negative",
)
chdt_hud_widget_render_data_output_color_a = (
"local_a",
"local_b",
"local_c",
"local_d",
"unknown_4",
"unknown_5",
"scoreboard_friendly",
"scoreboard_enemy",
"arming_team",
"metagame_player_1",
"metagame_player_2",
"metagame_player_3",
"metagame_player_4",
"unknown_13",
"global_dynamic_0",
"global_dynamic_1",
"global_dynamic_2",
"global_dynamic_3",
"global_dynamic_4",
"global_dynamic_5",
"global_dynamic_6",
"global_dynamic_7",
"global_dynamic_8",
"global_dynamic_9",
"global_dynamic_10",
"global_dynamic_11",
"global_dynamic_12",
"global_dynamic_13",
"global_dynamic_14",
"global_dynamic_15",
"global_dynamic_16",
"global_dynamic_17",
"global_dynamic_18",
"global_dynamic_19",
"global_dynamic_20",
"global_dynamic_21",
"global_dynamic_22",
"global_dynamic_23",
"global_dynamic_24",
"global_dynamic_25",
"global_dynamic_26",
"global_dynamic_27",
)
chdt_hud_widget_render_data_output_scalar_a = (
"input",
"range_input",
"local_a",
"local_b",
"local_c",
"local_d",
"unknown_6",
"unknown_7",
)
chdt_hud_widget_render_data_shader_index = (
"simple",
"meter",
"text_simple",
"meter_shield",
"meter_gradient",
"crosshair",
"directional_damage",
"solid",
"sensor",
"meter_single_color",
"navpoint",
"medal",
"texture_cam",
"cortana_screen",
"cortana_camera",
"cortana_offscreen",
"cortana_screen_final",
"meter_chapter",
"meter_double_gradient",
"meter_radial_gradient",
"turbulence",
"emblem",
"cortana_composite",
"directional_damage_apply",
"really_simple",
)
chdt_hud_widget_special_hud_type = (
"unspecial",
"ammo",
"crosshair_and_scope",
"unit_shield_meter",
"grenades",
"gametype",
"motion_sensor",
"spike_grenade",
"firebomb_grenade",
)
chdt_hud_widget_text_widget_font = (
"conduit_18_0",
"fixedsys_9_0",
"fixedsys_9_1",
"conduit_16_0",
"conduit_32_0",
"conduit_32_1",
"conduit_23",
"larabie_10",
"conduit_18_1",
"conduit_16_1",
"pragmata_14",
)
chdt_hud_widget_state_data = Struct("state_data",
Bool16("_1_engine",
("capture_the_flag", 1 << 4),
"slayer",
"oddball",
"king_of_the_hill",
"juggernaut",
"territories",
"assault",
"vip",
"infection",
("editor", 1 << 14),
"theater",
),
Bool16("_2",
"biped_1",
"biped_2",
"biped_3",
),
Bool16("_3",
"offense",
"defense",
"free_for_all",
("talking_disabled", 1 << 6),
"tap_to_talk",
"talking_enabled",
"not_talking",
"talking",
),
Bool16("_4_resolution", *unknown_flags_16),
Bool16("_5_scoreboard",
"has_friends",
"has_enemies",
"has_variant_name",
"someone_is_talking",
"is_arming",
"time_enabled",
"friends_have_x",
"enemies_have_x",
"friends_are_x",
"enemies_are_x",
"x_is_down",
"summary_enabled",
"netdebug",
),
Bool16("_6",
"texture_cam_enabled",
"autoaim",
("training_prompt", 1 << 4),
"objective_prompt",
),
Bool16("_7_editor",
"editor_inactive",
"editor_active",
"editor_holding",
"editor_not_allowed",
"is_editor_biped",
),
Bool16("_8",
"motion_tracker_10m",
"motion_tracker_25m",
"motion_tracker_75m",
"motion_tracker_150m",
("metagame_player_2_exists", 1 << 6),
("metagame_player_3_exists", 1 << 8),
("metagame_player_4_exists", 1 << 10),
("metagame_score_added", 1 << 12),
("metagame_score_removed", 1 << 14),
),
Bool16("_9",
"pickup_grenades",
),
Bool16("_10",
"binoculars_enabled",
"unit_is_zoomed_level_1",
"unit_is_zoomed_level_2",
),
Bool16("_11",
"primary_weapon",
"secondary_weapon",
),
Bool16("_12",
"motion_tracker_enabled",
("selected_frag_grenades", 1 << 2),
"selected_plasma_grenades",
"selected_spike_grenades",
"selected_fire_grenades",
("has_1x_overshield", 1 << 12),
"has_2x_overshield",
"has_3x_overshield",
"has_shields",
),
Bool16("_13",
("pickup_ammo", 1 << 1),
),
Bool16("_14",
"primary_weapon",
"secondary_weapon",
"backpack",
),
Bool16("_15",
"not_autoaim",
"autoaim_friendly",
"autoaim_enemy",
"autoaim_headshot",
("plasma_locked_on", 1 << 7),
),
Bool16("_16",
("missile_locked", 1 << 1),
"missile_locking",
),
Bool16("_17",
("has_frag_grenades", 1 << 2),
"has_plasma_grenades",
"has_spike_grenades",
"has_fire_grenades",
),
Bool16("_18_ammo",
"clip_warning",
"ammo_warning",
("low_battery_1", 1 << 4),
"low_battery_2",
"overheated",
),
Bool16("_19",
"binoculars_enabled",
"unit_is_zoomed_level_1",
"unit_is_zoomed_level_2",
),
SInt16("unknown", VISIBLE=False),
ENDIAN=">", SIZE=40
)
chdt_hud_widget_placement_data = Struct("placement_data",
SEnum16("anchor", *chdt_hud_widget_placement_data_anchor),
SInt16("unknown", VISIBLE=False),
QStruct("mirror_offset", INCLUDE=xy_float),
QStruct("offset", INCLUDE=xy_float),
QStruct("scale", INCLUDE=xy_float),
ENDIAN=">", SIZE=28
)
chdt_hud_widget_animation_data = Struct("animation_data",
Bool16("animation_1_flags",
"reverse_frames",
),
SEnum16("animation_1_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_1"),
Bool16("animation_2_flags",
"reverse_frames",
),
SEnum16("animation_2_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_2"),
Bool16("animation_3_flags",
"reverse_frames",
),
SEnum16("animation_3_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_3"),
Bool16("animation_4_flags",
"reverse_frames",
),
SEnum16("animation_4_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_4"),
Bool16("animation_5_flags",
"reverse_frames",
),
SEnum16("animation_5_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_5"),
Bool16("animation_6_flags",
"reverse_frames",
),
SEnum16("animation_6_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_6"),
ENDIAN=">", SIZE=120
)
chdt_hud_widget_render_data = Struct("render_data",
SEnum16("shader_index", *chdt_hud_widget_render_data_shader_index),
SInt16("unknown", VISIBLE=False),
SEnum16("input", *chdt_hud_widget_render_data_input),
SEnum16("range_input", *chdt_hud_widget_render_data_input),
color_argb_uint32("local_color_a"),
color_argb_uint32("local_color_b"),
color_argb_uint32("local_color_c"),
color_argb_uint32("local_color_d"),
Float("local_scalar_a"),
Float("local_scalar_b"),
Float("local_scalar_c"),
Float("local_scalar_d"),
SEnum16("output_color_a", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_b", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_c", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_d", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_e", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_f", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_scalar_a", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_b", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_c", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_d", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_e", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_f", *chdt_hud_widget_render_data_output_scalar_a),
ENDIAN=">", SIZE=64
)
chdt_hud_widget_bitmap_widget_state_data = Struct("state_data",
Bool16("_1_engine",
("capture_the_flag", 1 << 4),
"slayer",
"oddball",
"king_of_the_hill",
"juggernaut",
"territories",
"assault",
"vip",
"infection",
("editor", 1 << 14),
"theater",
),
Bool16("_2",
"biped_1",
"biped_2",
"biped_3",
),
Bool16("_3",
"offense",
"defense",
"free_for_all",
("talking_disabled", 1 << 6),
"tap_to_talk",
"talking_enabled",
"not_talking",
"talking",
),
Bool16("_4_resolution", *unknown_flags_16),
Bool16("_5_scoreboard",
"has_friends",
"has_enemies",
"has_variant_name",
"someone_is_talking",
"is_arming",
"time_enabled",
"friends_have_x",
"enemies_have_x",
"friends_are_x",
"enemies_are_x",
"x_is_down",
"summary_enabled",
"netdebug",
),
Bool16("_6",
"texture_cam_enabled",
"autoaim",
("training_prompt", 1 << 4),
"objective_prompt",
),
Bool16("_7_editor",
"editor_inactive",
"editor_active",
"editor_holding",
"editor_not_allowed",
"is_editor_biped",
),
Bool16("_8",
"motion_tracker_10m",
"motion_tracker_25m",
"motion_tracker_75m",
"motion_tracker_150m",
("metagame_player_2_exists", 1 << 6),
("metagame_player_3_exists", 1 << 8),
("metagame_player_4_exists", 1 << 10),
("metagame_score_added", 1 << 12),
("metagame_score_removed", 1 << 14),
),
Bool16("_9",
"pickup_grenades",
),
Bool16("_10",
"binoculars_enabled",
"unit_is_zoomed_level_1",
"unit_is_zoomed_level_2",
),
Bool16("_11",
"primary_weapon",
"secondary_weapon",
),
Bool16("_12",
"motion_tracker_enabled",
("selected_frag_grenades", 1 << 2),
"selected_plasma_grenades",
"selected_spike_grenades",
"selected_fire_grenades",
("has_1x_overshield", 1 << 12),
"has_2x_overshield",
"has_3x_overshield",
"has_shields",
),
Bool16("_13",
("pickup_ammo", 1 << 1),
),
Bool16("_14",
"primary_weapon",
"secondary_weapon",
"backpack",
),
Bool16("_15",
"not_autoaim",
"autoaim_friendly",
"autoaim_enemy",
"autoaim_headshot",
("plasma_locked_on", 1 << 7),
),
Bool16("_16",
("missile_locked", 1 << 1),
"missile_locking",
),
Bool16("_17",
("has_frag_grenades", 1 << 2),
"has_plasma_grenades",
"has_spike_grenades",
"has_fire_grenades",
),
Bool16("_18_ammo",
"clip_warning",
"ammo_warning",
("low_battery_1", 1 << 4),
"low_battery_2",
"overheated",
),
Bool16("_19",
"binoculars_enabled",
"unit_is_zoomed_level_1",
"unit_is_zoomed_level_2",
),
SInt16("unknown", VISIBLE=False),
ENDIAN=">", SIZE=40
)
chdt_hud_widget_bitmap_widget_placement_data = Struct("placement_data",
SEnum16("anchor", *chdt_hud_widget_placement_data_anchor),
SInt16("unknown", VISIBLE=False),
QStruct("mirror_offset", INCLUDE=xy_float),
QStruct("offset", INCLUDE=xy_float),
QStruct("scale", INCLUDE=xy_float),
ENDIAN=">", SIZE=28
)
chdt_hud_widget_bitmap_widget_animation_data = Struct("animation_data",
Bool16("animation_1_flags",
"reverse_frames",
),
SEnum16("animation_1_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_1"),
Bool16("animation_2_flags",
"reverse_frames",
),
SEnum16("animation_2_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_2"),
Bool16("animation_3_flags",
"reverse_frames",
),
SEnum16("animation_3_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_3"),
Bool16("animation_4_flags",
"reverse_frames",
),
SEnum16("animation_4_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_4"),
Bool16("animation_5_flags",
"reverse_frames",
),
SEnum16("animation_5_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_5"),
Bool16("animation_6_flags",
"reverse_frames",
),
SEnum16("animation_6_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_6"),
ENDIAN=">", SIZE=120
)
chdt_hud_widget_bitmap_widget_render_data = Struct("render_data",
SEnum16("shader_index", *chdt_hud_widget_render_data_shader_index),
SInt16("unknown", VISIBLE=False),
SEnum16("input", *chdt_hud_widget_render_data_input),
SEnum16("range_input", *chdt_hud_widget_render_data_input),
color_argb_uint32("local_color_a"),
color_argb_uint32("local_color_b"),
color_argb_uint32("local_color_c"),
color_argb_uint32("local_color_d"),
Float("local_scalar_a"),
Float("local_scalar_b"),
Float("local_scalar_c"),
Float("local_scalar_d"),
SEnum16("output_color_a", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_b", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_c", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_d", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_e", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_f", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_scalar_a", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_b", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_c", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_d", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_e", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_f", *chdt_hud_widget_render_data_output_scalar_a),
ENDIAN=">", SIZE=64
)
chdt_hud_widget_bitmap_widget = Struct("bitmap_widget",
h3_string_id("name"),
SEnum16("special_hud_type", *chdt_hud_widget_special_hud_type),
UInt8("unknown_0", VISIBLE=False),
UInt8("unknown_1", VISIBLE=False),
h3_reflexive("state_data", chdt_hud_widget_bitmap_widget_state_data),
h3_reflexive("placement_data", chdt_hud_widget_bitmap_widget_placement_data),
h3_reflexive("animation_data", chdt_hud_widget_bitmap_widget_animation_data),
h3_reflexive("render_data", chdt_hud_widget_bitmap_widget_render_data),
SInt32("widget_index"),
Bool16("flags",
"mirror_horizontally",
"mirror_vertically",
"stretch_edges",
"enable_texture_cam",
"looping",
("player_1_emblem", 1 << 6),
"player_2_emblem",
"player_3_emblem",
"player_4_emblem",
),
SInt16("unknown_2", VISIBLE=False),
h3_dependency("bitmap"),
UInt8("bitmap_sprite_index"),
UInt8("unknown_3", VISIBLE=False),
UInt8("unknown_4", VISIBLE=False),
UInt8("unknown_5", VISIBLE=False),
ENDIAN=">", SIZE=84
)
chdt_hud_widget_text_widget_state_data = Struct("state_data",
Bool16("_1_engine",
("capture_the_flag", 1 << 4),
"slayer",
"oddball",
"king_of_the_hill",
"juggernaut",
"territories",
"assault",
"vip",
"infection",
("editor", 1 << 14),
"theater",
),
Bool16("_2",
"biped_1",
"biped_2",
"biped_3",
),
Bool16("_3",
"offense",
"defense",
"free_for_all",
("talking_disabled", 1 << 6),
"tap_to_talk",
"talking_enabled",
"not_talking",
"talking",
),
Bool16("_4_resolution", *unknown_flags_16),
Bool16("_5_scoreboard",
"has_friends",
"has_enemies",
"has_variant_name",
"someone_is_talking",
"is_arming",
"time_enabled",
"friends_have_x",
"enemies_have_x",
"friends_are_x",
"enemies_are_x",
"x_is_down",
"summary_enabled",
"netdebug",
),
Bool16("_6",
"texture_cam_enabled",
"autoaim",
("training_prompt", 1 << 4),
"objective_prompt",
),
Bool16("_7_editor",
"editor_inactive",
"editor_active",
"editor_holding",
"editor_not_allowed",
"is_editor_biped",
),
Bool16("_8",
"motion_tracker_10m",
"motion_tracker_25m",
"motion_tracker_75m",
"motion_tracker_150m",
("metagame_player_2_exists", 1 << 6),
("metagame_player_3_exists", 1 << 8),
("metagame_player_4_exists", 1 << 10),
("metagame_score_added", 1 << 12),
("metagame_score_removed", 1 << 14),
),
Bool16("_9",
"pickup_grenades",
),
Bool16("_10",
"binoculars_enabled",
"unit_is_zoomed_level_1",
"unit_is_zoomed_level_2",
),
Bool16("_11",
"primary_weapon",
"secondary_weapon",
),
Bool16("_12",
"motion_tracker_enabled",
("selected_frag_grenades", 1 << 2),
"selected_plasma_grenades",
"selected_spike_grenades",
"selected_fire_grenades",
("has_1x_overshield", 1 << 12),
"has_2x_overshield",
"has_3x_overshield",
"has_shields",
),
Bool16("_13",
("pickup_ammo", 1 << 1),
),
Bool16("_14",
"primary_weapon",
"secondary_weapon",
"backpack",
),
Bool16("_15",
"not_autoaim",
"autoaim_friendly",
"autoaim_enemy",
"autoaim_headshot",
("plasma_locked_on", 1 << 7),
),
Bool16("_16",
("missile_locked", 1 << 1),
"missile_locking",
),
Bool16("_17",
("has_frag_grenades", 1 << 2),
"has_plasma_grenades",
"has_spike_grenades",
"has_fire_grenades",
),
Bool16("_18_ammo",
"clip_warning",
"ammo_warning",
("low_battery_1", 1 << 4),
"low_battery_2",
"overheated",
),
Bool16("_19",
"binoculars_enabled",
"unit_is_zoomed_level_1",
"unit_is_zoomed_level_2",
),
SInt16("unknown", VISIBLE=False),
ENDIAN=">", SIZE=40
)
chdt_hud_widget_text_widget_placement_data = Struct("placement_data",
SEnum16("anchor", *chdt_hud_widget_placement_data_anchor),
SInt16("unknown", VISIBLE=False),
QStruct("mirror_offset", INCLUDE=xy_float),
QStruct("offset", INCLUDE=xy_float),
QStruct("scale", INCLUDE=xy_float),
ENDIAN=">", SIZE=28
)
chdt_hud_widget_text_widget_animation_data = Struct("animation_data",
Bool16("animation_1_flags",
"reverse_frames",
),
SEnum16("animation_1_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_1"),
Bool16("animation_2_flags",
"reverse_frames",
),
SEnum16("animation_2_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_2"),
Bool16("animation_3_flags",
"reverse_frames",
),
SEnum16("animation_3_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_3"),
Bool16("animation_4_flags",
"reverse_frames",
),
SEnum16("animation_4_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_4"),
Bool16("animation_5_flags",
"reverse_frames",
),
SEnum16("animation_5_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_5"),
Bool16("animation_6_flags",
"reverse_frames",
),
SEnum16("animation_6_function", *chdt_hud_widget_animation_data_animation_1_function),
h3_dependency("animation_6"),
ENDIAN=">", SIZE=120
)
chdt_hud_widget_text_widget_render_data = Struct("render_data",
SEnum16("shader_index", *chdt_hud_widget_render_data_shader_index),
SInt16("unknown", VISIBLE=False),
SEnum16("input", *chdt_hud_widget_render_data_input),
SEnum16("range_input", *chdt_hud_widget_render_data_input),
color_argb_uint32("local_color_a"),
color_argb_uint32("local_color_b"),
color_argb_uint32("local_color_c"),
color_argb_uint32("local_color_d"),
Float("local_scalar_a"),
Float("local_scalar_b"),
Float("local_scalar_c"),
Float("local_scalar_d"),
SEnum16("output_color_a", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_b", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_c", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_d", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_e", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_color_f", *chdt_hud_widget_render_data_output_color_a),
SEnum16("output_scalar_a", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_b", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_c", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_d", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_e", *chdt_hud_widget_render_data_output_scalar_a),
SEnum16("output_scalar_f", *chdt_hud_widget_render_data_output_scalar_a),
ENDIAN=">", SIZE=64
)
chdt_hud_widget_text_widget = Struct("text_widget",
h3_string_id("name"),
SEnum16("special_hud_type", *chdt_hud_widget_special_hud_type),
UInt8("unknown_0", VISIBLE=False),
UInt8("unknown_1", VISIBLE=False),
h3_reflexive("state_data", chdt_hud_widget_text_widget_state_data),
h3_reflexive("placement_data", chdt_hud_widget_text_widget_placement_data),
h3_reflexive("animation_data", chdt_hud_widget_text_widget_animation_data),
h3_reflexive("render_data", chdt_hud_widget_text_widget_render_data),
SInt32("widget_index"),
Bool16("flags",
"string_is_a_number",
"force_2_digit",
"force_3_digit",
"prefix_0",
"m_suffix",
"hundredths_decimal",
"thousandths_decimal",
"hundred_thousandths_decimal",
"only_a_number",
"x_suffix",
"in_brackets",
"time_format_s_ms",
"time_format_h_m_s",
"money_format",
"prefix_1",
),
SEnum16("font", *chdt_hud_widget_text_widget_font),
h3_string_id("string"),
ENDIAN=">", SIZE=68
)
chdt_hud_widget = Struct("hud_widget",
h3_string_id("name"),
SEnum16("special_hud_type", *chdt_hud_widget_special_hud_type),
UInt8("unknown_0", VISIBLE=False),
UInt8("unknown_1", VISIBLE=False),
h3_reflexive("state_data", chdt_hud_widget_state_data),
h3_reflexive("placement_data", chdt_hud_widget_placement_data),
h3_reflexive("animation_data", chdt_hud_widget_animation_data),
h3_reflexive("render_data", chdt_hud_widget_render_data),
h3_reflexive("bitmap_widgets", chdt_hud_widget_bitmap_widget),
h3_reflexive("text_widgets", chdt_hud_widget_text_widget),
ENDIAN=">", SIZE=80
)
chdt_body = Struct("tagdata",
h3_reflexive("hud_widgets", chdt_hud_widget),
SInt32("low_clip_cutoff"),
SInt32("low_ammo_cutoff"),
SInt32("age_cutoff"),
ENDIAN=">", SIZE=24
)
def get():
return chdt_def
chdt_def = TagDef("chdt",
h3_blam_header('chdt'),
chdt_body,
ext=".%s" % h3_tag_class_fcc_to_ext["chdt"], endian=">", tag_cls=H3Tag
)
| 29.949896
| 91
| 0.613133
| 3,183
| 28,692
| 4.963556
| 0.113729
| 0.062093
| 0.088866
| 0.061333
| 0.783024
| 0.778277
| 0.769352
| 0.761694
| 0.748845
| 0.744921
| 0
| 0.044086
| 0.260038
| 28,692
| 958
| 92
| 29.949896
| 0.700061
| 0.018995
| 0
| 0.703911
| 1
| 0
| 0.366991
| 0.048347
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001117
| false
| 0
| 0.003352
| 0.001117
| 0.005587
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
258bbc3fa190285614f20a994d4a2cbebe08ad1a
| 172
|
py
|
Python
|
Computer science/Programming languages/Python/Basics/Simple programs/Program with Numbers/Last digit of a number/last_digit_of_a_number.py
|
chanchanchong/PYTHON-TRACK-IN-HYPERSKILL
|
462fe08ff4a2b183fd45a0235ab1ec7a788bd54c
|
[
"MIT"
] | null | null | null |
Computer science/Programming languages/Python/Basics/Simple programs/Program with Numbers/Last digit of a number/last_digit_of_a_number.py
|
chanchanchong/PYTHON-TRACK-IN-HYPERSKILL
|
462fe08ff4a2b183fd45a0235ab1ec7a788bd54c
|
[
"MIT"
] | null | null | null |
Computer science/Programming languages/Python/Basics/Simple programs/Program with Numbers/Last digit of a number/last_digit_of_a_number.py
|
chanchanchong/PYTHON-TRACK-IN-HYPERSKILL
|
462fe08ff4a2b183fd45a0235ab1ec7a788bd54c
|
[
"MIT"
] | null | null | null |
# Write a program that reads an integer and outputs its last digit.
# print(int(input()) % 10)
# another solution
print(input()[-1])
# another solution
print(input()[-1])
| 21.5
| 67
| 0.697674
| 26
| 172
| 4.615385
| 0.730769
| 0.25
| 0.333333
| 0.416667
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027397
| 0.151163
| 172
| 8
| 68
| 21.5
| 0.794521
| 0.72093
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
25ff22ef9f2f0ef59dee1a215a95b0159ced2618
| 4,232
|
py
|
Python
|
sana_pchr/migrations/0002_auto_20160106_0638.py
|
SanaMobile/sana.pchr.oss-web
|
2b2fd75a1730f1743e28b4499bb1ba76fa100970
|
[
"BSD-3-Clause"
] | null | null | null |
sana_pchr/migrations/0002_auto_20160106_0638.py
|
SanaMobile/sana.pchr.oss-web
|
2b2fd75a1730f1743e28b4499bb1ba76fa100970
|
[
"BSD-3-Clause"
] | null | null | null |
sana_pchr/migrations/0002_auto_20160106_0638.py
|
SanaMobile/sana.pchr.oss-web
|
2b2fd75a1730f1743e28b4499bb1ba76fa100970
|
[
"BSD-3-Clause"
] | 2
|
2018-06-07T21:54:08.000Z
|
2018-07-11T20:40:19.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from sana_pchr.models.fields import DefaultFuncs
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sana_pchr', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='device',
name='lastSynchronized',
field=models.DateTimeField(default=DefaultFuncs.getNow),
preserve_default=True,
),
migrations.AddField(
model_name='clinic',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='clinic_physician',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='device',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='encounter',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='encountercategory',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='patient',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='patient_physician',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='physician',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='record',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='recordcategory',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='test',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='testcategory',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='visit',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AddField(
model_name='visitcategory',
name='deleted',
field=models.DateTimeField(default=DefaultFuncs.far_future()),
preserve_default=True,
),
migrations.AlterField(
model_name='patient_physician',
name='uuid',
field=models.CharField(primary_key=True, max_length=36, serialize=False),
preserve_default=True,
),
migrations.AlterField(
model_name='clinic_physician',
name='uuid',
field=models.CharField(primary_key=True, max_length=36, serialize=False),
preserve_default=True,
),
]
| 35.864407
| 85
| 0.547259
| 333
| 4,232
| 6.762763
| 0.171171
| 0.06794
| 0.143428
| 0.206039
| 0.863677
| 0.83881
| 0.819716
| 0.798401
| 0.772647
| 0.772647
| 0
| 0.003287
| 0.353025
| 4,232
| 117
| 86
| 36.17094
| 0.819211
| 0.004962
| 0
| 0.794643
| 0
| 0
| 0.07674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d31e77f10a788d8cc890acf0cbcea964ded2c44b
| 222
|
py
|
Python
|
alice_images/__init__.py
|
AlekseevAV/alice-images
|
28021320d9e6d48e7ef1f878e039b70002382fbc
|
[
"MIT"
] | null | null | null |
alice_images/__init__.py
|
AlekseevAV/alice-images
|
28021320d9e6d48e7ef1f878e039b70002382fbc
|
[
"MIT"
] | null | null | null |
alice_images/__init__.py
|
AlekseevAV/alice-images
|
28021320d9e6d48e7ef1f878e039b70002382fbc
|
[
"MIT"
] | null | null | null |
from .alice_images_api import upload_image, uploaded_images_list, delete_uploaded_image, check_free_space
__all__ = [
'upload_image',
'uploaded_images_list',
'delete_uploaded_image',
'check_free_space',
]
| 24.666667
| 105
| 0.77027
| 28
| 222
| 5.392857
| 0.5
| 0.145695
| 0.251656
| 0.331126
| 0.821192
| 0.821192
| 0.821192
| 0.821192
| 0.821192
| 0.821192
| 0
| 0
| 0.144144
| 222
| 8
| 106
| 27.75
| 0.794737
| 0
| 0
| 0
| 0
| 0
| 0.310811
| 0.094595
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
d32a6ae971be6b76e598e2b26b41f00ef75f6450
| 6,752
|
py
|
Python
|
tests/esp_sim/test_esp_sim.py
|
CMargreitter/Icolos
|
fd7b664ce177df875fefa910dc4d5c574b521cb3
|
[
"Apache-2.0"
] | 11
|
2022-01-30T14:36:13.000Z
|
2022-03-22T09:40:57.000Z
|
tests/esp_sim/test_esp_sim.py
|
CMargreitter/Icolos
|
fd7b664ce177df875fefa910dc4d5c574b521cb3
|
[
"Apache-2.0"
] | 2
|
2022-03-23T07:56:49.000Z
|
2022-03-24T12:01:42.000Z
|
tests/esp_sim/test_esp_sim.py
|
CMargreitter/Icolos
|
fd7b664ce177df875fefa910dc4d5c574b521cb3
|
[
"Apache-2.0"
] | 8
|
2022-01-28T10:32:31.000Z
|
2022-03-22T09:40:59.000Z
|
import unittest
from icolos.core.workflow_steps.calculation.electrostatics.esp_sim import StepEspSim
from icolos.utils.enums.step_enums import StepBaseEnum
from tests.tests_paths import export_unit_test_env_vars
_SBE = StepBaseEnum
class Test_EspSim(unittest.TestCase):
@classmethod
def setUpClass(cls):
export_unit_test_env_vars()
def setUp(self):
pass
@classmethod
def tearDownClass(cls):
pass
def test_esp_sim_resp_charges(self):
step_conf = {
_SBE.STEPID: "01_esp_sim",
_SBE.STEP_TYPE: _SBE.STEP_ESP_SIM,
_SBE.EXEC: {
_SBE.EXEC_PARALLELIZATION: {
_SBE.EXEC_PARALLELIZATION_CORES: 8,
_SBE.EXEC_PARALLELIZATION_MAXLENSUBLIST: 1,
},
_SBE.EXEC_FAILUREPOLICY: {_SBE.EXEC_FAILUREPOLICY_NTRIES: 3},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ARGUMENTS: {_SBE.SETTINGS_ARGUMENTS_PARAMETERS: {}},
_SBE.SETTINGS_ADDITIONAL: {
"ref_smiles": "Nc1ncnc(c12)n(CCCC)c(n2)Cc3cccc(c3)OC",
"charge_method": "resp",
},
},
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: "Nc1ncnc(c12)n(CCCC)c(n2)Cc3cc(OC)c(OC)c(c3)OC",
_SBE.INPUT_SOURCE_TYPE: _SBE.INPUT_SOURCE_TYPE_STRING,
}
]
},
}
step_esp_sim = StepEspSim(**step_conf)
step_esp_sim.generate_input()
step_esp_sim.execute()
esp_sim_score = [0.604]
shape_sim_score = [0.624]
for i in range(len(esp_sim_score)):
self.assertEqual(
round(
float(
step_esp_sim.data.compounds[i]
.get_enumerations()[0]
.get_conformers()[0]
.get_molecule()
.GetProp("esp_sim")
),
ndigits=3,
),
esp_sim_score[i],
)
self.assertEqual(
round(
float(
step_esp_sim.data.compounds[i]
.get_enumerations()[0]
.get_conformers()[0]
.get_molecule()
.GetProp("shape_sim")
),
ndigits=3,
),
shape_sim_score[i],
)
def test_esp_sim_gasteiger_charges(self):
step_conf = {
_SBE.STEPID: "01_esp_sim",
_SBE.STEP_TYPE: _SBE.STEP_ESP_SIM,
_SBE.EXEC: {
_SBE.EXEC_PARALLELIZATION: {
_SBE.EXEC_PARALLELIZATION_CORES: 8,
_SBE.EXEC_PARALLELIZATION_MAXLENSUBLIST: 1,
},
_SBE.EXEC_FAILUREPOLICY: {_SBE.EXEC_FAILUREPOLICY_NTRIES: 3},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ADDITIONAL: {
"ref_smiles": "C(C(C(=O)O)O)O",
"charge_method": "gasteiger",
}
},
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: "C1=CC=C(C=C1)C(C(=O)O)O",
_SBE.INPUT_SOURCE_TYPE: _SBE.INPUT_SOURCE_TYPE_STRING,
}
]
},
}
step_esp_sim = StepEspSim(**step_conf)
step_esp_sim.generate_input()
step_esp_sim.execute()
esp_sim_score = [0.533]
shape_sim_score = [0.422]
for i in range(len(esp_sim_score)):
self.assertEqual(
round(
float(
step_esp_sim.data.compounds[i]
.get_enumerations()[0]
.get_conformers()[0]
.get_molecule()
.GetProp("esp_sim")
),
ndigits=3,
),
esp_sim_score[i],
)
self.assertEqual(
round(
float(
step_esp_sim.data.compounds[i]
.get_enumerations()[0]
.get_conformers()[0]
.get_molecule()
.GetProp("shape_sim")
),
ndigits=3,
),
shape_sim_score[i],
)
def test_esp_sim_am1bcc_charges(self):
step_conf = {
_SBE.STEPID: "01_esp_sim",
_SBE.STEP_TYPE: _SBE.STEP_ESP_SIM,
_SBE.EXEC: {
_SBE.EXEC_PARALLELIZATION: {
_SBE.EXEC_PARALLELIZATION_CORES: 8,
_SBE.EXEC_PARALLELIZATION_MAXLENSUBLIST: 1,
},
_SBE.EXEC_FAILUREPOLICY: {_SBE.EXEC_FAILUREPOLICY_NTRIES: 3},
},
_SBE.SETTINGS: {
_SBE.SETTINGS_ADDITIONAL: {
"ref_smiles": "C(C(C(=O)O)O)O",
"charge_method": "am1-bcc",
}
},
_SBE.INPUT: {
_SBE.INPUT_COMPOUNDS: [
{
_SBE.INPUT_SOURCE: "C1=CC=C(C=C1)C(C(=O)O)O",
_SBE.INPUT_SOURCE_TYPE: _SBE.INPUT_SOURCE_TYPE_STRING,
}
]
},
}
step_esp_sim = StepEspSim(**step_conf)
step_esp_sim.generate_input()
step_esp_sim.execute()
esp_sim_score = [0.474]
shape_sim_score = [0.422]
for i in range(len(esp_sim_score)):
self.assertEqual(
round(
float(
step_esp_sim.data.compounds[i]
.get_enumerations()[0]
.get_conformers()[0]
.get_molecule()
.GetProp("esp_sim")
),
ndigits=3,
),
esp_sim_score[i],
)
self.assertEqual(
round(
float(
step_esp_sim.data.compounds[i]
.get_enumerations()[0]
.get_conformers()[0]
.get_molecule()
.GetProp("shape_sim")
),
ndigits=3,
),
shape_sim_score[i],
)
| 32.461538
| 91
| 0.432761
| 594
| 6,752
| 4.513468
| 0.175084
| 0.082805
| 0.067139
| 0.040283
| 0.837001
| 0.810145
| 0.796718
| 0.796718
| 0.782171
| 0.782171
| 0
| 0.021192
| 0.475859
| 6,752
| 207
| 92
| 32.618357
| 0.736366
| 0
| 0
| 0.703704
| 0
| 0.005291
| 0.047838
| 0.018957
| 0
| 0
| 0
| 0
| 0.031746
| 1
| 0.031746
| false
| 0.010582
| 0.021164
| 0
| 0.058201
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d35f382f94e389c06c0b594f086d716c586e11e5
| 167
|
py
|
Python
|
allennlp/sanity_checks/__init__.py
|
jbrry/allennlp
|
d906175d953bebcc177567ec0157220c3bd1b9ad
|
[
"Apache-2.0"
] | 2
|
2022-01-02T12:15:21.000Z
|
2022-01-02T12:15:23.000Z
|
allennlp/sanity_checks/__init__.py
|
jbrry/allennlp
|
d906175d953bebcc177567ec0157220c3bd1b9ad
|
[
"Apache-2.0"
] | 56
|
2020-03-14T21:10:07.000Z
|
2022-03-28T13:04:57.000Z
|
allennlp/sanity_checks/__init__.py
|
jbrry/allennlp
|
d906175d953bebcc177567ec0157220c3bd1b9ad
|
[
"Apache-2.0"
] | 3
|
2020-09-22T17:35:53.000Z
|
2022-02-08T01:03:03.000Z
|
from allennlp.sanity_checks.verification_base import VerificationBase
from allennlp.sanity_checks.normalization_bias_verification import NormalizationBiasVerification
| 55.666667
| 96
| 0.928144
| 17
| 167
| 8.823529
| 0.647059
| 0.16
| 0.24
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047904
| 167
| 2
| 97
| 83.5
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d364fa878df7e1fb23bc770d455858673fa78f56
| 163
|
py
|
Python
|
pcrnet/data_utils/__init__.py
|
asafmanor/pcrnet_pytorch
|
e9f34e918f5582e7c0969481a96036dd5fb046cd
|
[
"MIT"
] | 48
|
2020-01-06T07:21:13.000Z
|
2022-02-19T10:34:50.000Z
|
pcrnet/data_utils/__init__.py
|
asafmanor/pcrnet_pytorch
|
e9f34e918f5582e7c0969481a96036dd5fb046cd
|
[
"MIT"
] | 11
|
2020-03-21T11:49:21.000Z
|
2021-02-06T09:31:30.000Z
|
pcrnet/data_utils/__init__.py
|
asafmanor/pcrnet_pytorch
|
e9f34e918f5582e7c0969481a96036dd5fb046cd
|
[
"MIT"
] | 12
|
2020-01-12T10:18:53.000Z
|
2022-01-11T07:48:26.000Z
|
from .dataloaders import ModelNet40Data
from .dataloaders import RegistrationData
from .dataloaders import download_modelnet40, deg_to_rad, create_random_transform
| 54.333333
| 81
| 0.889571
| 19
| 163
| 7.368421
| 0.684211
| 0.321429
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026667
| 0.079755
| 163
| 3
| 81
| 54.333333
| 0.906667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d372ff2b1543d2428289561d3aae48be0146b7b1
| 6,376
|
py
|
Python
|
microraiden/test/test_close_all_channels.py
|
andrevmatos/microraiden
|
2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd
|
[
"MIT"
] | 417
|
2017-09-19T19:06:23.000Z
|
2021-11-28T05:39:23.000Z
|
microraiden/test/test_close_all_channels.py
|
andrevmatos/microraiden
|
2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd
|
[
"MIT"
] | 259
|
2017-09-19T20:42:57.000Z
|
2020-11-18T01:31:41.000Z
|
microraiden/test/test_close_all_channels.py
|
andrevmatos/microraiden
|
2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd
|
[
"MIT"
] | 126
|
2017-09-19T17:11:39.000Z
|
2020-12-17T17:05:27.000Z
|
import pytest
from eth_utils import (
encode_hex,
)
from ethereum.tester import TransactionFailed
from web3 import Web3
from web3.exceptions import BadFunctionCallOutput
from microraiden import Client
from microraiden.channel_manager import ChannelManager
from microraiden.close_all_channels import close_open_channels
def test_close_simple(
client: Client,
channel_manager: ChannelManager,
web3: Web3,
wait_for_blocks
):
sender = client.context.address
receiver = channel_manager.receiver
channel = client.open_channel(receiver, 10)
wait_for_blocks(channel_manager.n_confirmations + 1)
channel_manager.register_payment(sender, channel.block, 2,
encode_hex(channel.create_transfer(2)))
channel_manager.stop() # don't update state from this point on
channel_manager.join()
state = channel_manager.state
tx_count_before = web3.eth.getTransactionCount(receiver)
close_open_channels(
channel_manager.private_key,
state,
channel_manager.channel_manager_contract,
wait=lambda: wait_for_blocks(1)
)
tx_count_after = web3.eth.getTransactionCount(receiver)
assert tx_count_after == tx_count_before + 1
with pytest.raises((BadFunctionCallOutput, TransactionFailed)):
channel_id = (channel.sender, channel.receiver, channel.block)
channel_manager.channel_manager_contract.call().getChannelInfo(*channel_id)
wait_for_blocks(1)
def test_close_topup(
client: Client,
channel_manager: ChannelManager,
web3: Web3,
wait_for_blocks
):
sender = client.context.address
receiver = channel_manager.receiver
channel = client.open_channel(receiver, 10)
wait_for_blocks(channel_manager.n_confirmations + 1)
channel.topup(5)
wait_for_blocks(channel_manager.n_confirmations + 1)
channel_manager.register_payment(sender, channel.block, 12,
encode_hex(channel.create_transfer(12)))
channel_manager.stop() # don't update state from this point on
channel_manager.join()
state = channel_manager.state
tx_count_before = web3.eth.getTransactionCount(receiver)
close_open_channels(
channel_manager.private_key,
state,
channel_manager.channel_manager_contract,
wait=lambda: wait_for_blocks(1)
)
tx_count_after = web3.eth.getTransactionCount(receiver)
assert tx_count_after == tx_count_before + 1
with pytest.raises((BadFunctionCallOutput, TransactionFailed)):
channel_id = (channel.sender, channel.receiver, channel.block)
channel_manager.channel_manager_contract.call().getChannelInfo(*channel_id)
wait_for_blocks(1)
def test_close_valid_close(
client: Client,
channel_manager: ChannelManager,
web3: Web3,
wait_for_blocks
):
sender = client.context.address
receiver = channel_manager.receiver
channel = client.open_channel(receiver, 10)
wait_for_blocks(channel_manager.n_confirmations + 1)
channel_manager.register_payment(sender, channel.block, 2,
encode_hex(channel.create_transfer(2)))
channel.close()
channel_manager.stop() # don't update state from this point on
channel_manager.join()
state = channel_manager.state
tx_count_before = web3.eth.getTransactionCount(receiver)
close_open_channels(
channel_manager.private_key,
state,
channel_manager.channel_manager_contract,
wait=lambda: wait_for_blocks(1)
)
tx_count_after = web3.eth.getTransactionCount(receiver)
assert tx_count_after == tx_count_before + 1
with pytest.raises((BadFunctionCallOutput, TransactionFailed)):
channel_id = (channel.sender, channel.receiver, channel.block)
channel_manager.channel_manager_contract.call().getChannelInfo(*channel_id)
wait_for_blocks(1)
def test_close_invalid_close(
client: Client,
channel_manager: ChannelManager,
web3: Web3,
wait_for_blocks
):
sender = client.context.address
receiver = channel_manager.receiver
channel = client.open_channel(receiver, 10)
wait_for_blocks(channel_manager.n_confirmations + 1)
channel_manager.register_payment(sender, channel.block, 2,
encode_hex(channel.create_transfer(2)))
# cheat
channel.update_balance(0)
channel.create_transfer(1)
channel.close()
channel_manager.stop() # don't update state from this point on
channel_manager.join()
state = channel_manager.state
tx_count_before = web3.eth.getTransactionCount(receiver)
close_open_channels(
channel_manager.private_key,
state,
channel_manager.channel_manager_contract,
wait=lambda: wait_for_blocks(1)
)
tx_count_after = web3.eth.getTransactionCount(receiver)
assert tx_count_after == tx_count_before + 1
with pytest.raises((BadFunctionCallOutput, TransactionFailed)):
channel_id = (channel.sender, channel.receiver, channel.block)
channel_manager.channel_manager_contract.call().getChannelInfo(*channel_id)
wait_for_blocks(1)
def test_close_settled(
client: Client,
channel_manager: ChannelManager,
web3: Web3,
wait_for_blocks
):
sender = client.context.address
receiver = channel_manager.receiver
channel = client.open_channel(receiver, 10)
wait_for_blocks(channel_manager.n_confirmations + 1)
channel_manager.register_payment(sender, channel.block, 2,
encode_hex(channel.create_transfer(2)))
receiver_sig = channel_manager.sign_close(sender, channel.block, 2)
channel.close_cooperatively(receiver_sig)
wait_for_blocks(channel_manager.n_confirmations + 1)
channel_manager.stop() # don't update state from this point on
channel_manager.join()
state = channel_manager.state
tx_count_before = web3.eth.getTransactionCount(receiver)
close_open_channels(
channel_manager.private_key,
state,
channel_manager.channel_manager_contract,
wait=lambda: wait_for_blocks(1)
)
tx_count_after = web3.eth.getTransactionCount(receiver)
assert tx_count_after == tx_count_before
wait_for_blocks(1)
| 33.914894
| 83
| 0.715182
| 742
| 6,376
| 5.836927
| 0.105121
| 0.200416
| 0.066036
| 0.078504
| 0.890095
| 0.883168
| 0.883168
| 0.883168
| 0.883168
| 0.883168
| 0
| 0.01387
| 0.208438
| 6,376
| 187
| 84
| 34.096257
| 0.844264
| 0.030583
| 0
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032051
| 1
| 0.032051
| false
| 0
| 0.051282
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d387b8fbeec1a1bdb3f5d64a91819421954312ee
| 89,052
|
py
|
Python
|
pirates/makeapirate/ClothingGlobals.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/makeapirate/ClothingGlobals.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/makeapirate/ClothingGlobals.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.makeapirate.ClothingGlobals
from pandac.PandaModules import VBase4
from pirates.piratesbase import PLocalizer
from pirates.piratesbase import PiratesGlobals
from pirates.ai import HolidayGlobals
from pirates.inventory import ItemGlobals
import random
from pirates.inventory.ItemConstants import DYE_COLORS
HAT = 0
SHIRT = 1
VEST = 2
COAT = 3
PANT = 4
BELT = 5
SOCK = 6
SHOE = 7
DYE_COLOR_LEVEL = {0: [0, 1, 2, 3, 4], 5: [5, 6, 7, 8, 9], 10: [10, 11, 12, 13, 14], 15: [15, 16, 17, 18, 19], 20: [20, 21, 22, 23, 24], 25: [25, 26, 27, 28, 29, 30, 31]}
UNDERWEAR = {'m': {SHIRT: (0, 0, 0), PANT: (2, 2, 0)}, 'f': {SHIRT: (0, 0, 0), PANT: (1, 2, 0)}}
CLOTHING_NUMBER = {'HAT': HAT, 'SHIRT': SHIRT, 'VEST': VEST, 'COAT': COAT, 'BELT': BELT, 'PANT': PANT, 'SHOE': SHOE}
CLOTHING_STRING = {HAT: 'HAT', SHIRT: 'SHIRT', VEST: 'VEST', COAT: 'COAT', BELT: 'BELT', PANT: 'PANT', SHOE: 'SHOE'}
CLOTHING_NAMES = {0: {'MALE': {0: 'None', 1: 'Captain', 2: 'Tricorn', 3: 'Navy', 4: 'EITC', 5: 'Admiral', 6: 'Bandana Full', 7: 'Bandana Regular', 8: 'Beanie', 9: 'Barbossa', 10: 'French', 11: 'Spanish', 12: 'French 1', 13: 'French 2', 14: 'French 3', 15: 'Spanish 1', 16: 'Spanish 2', 17: 'Spanish 3', 18: 'Land 1', 19: 'Land 2', 20: 'Land 3', 21: 'Holiday', 22: 'Party 1', 23: 'Party 2', 24: 'GM'}, 'FEMALE': {0: 'None', 1: 'Dress', 2: 'Redcoat', 3: 'Navy w/ Feather', 4: 'Worker', 5: 'Bandana Full', 6: 'Bandana Regular', 7: 'French', 8: 'Spanish', 9: 'French 1', 10: 'French 2', 11: 'French 3', 12: 'Spanish 1', 13: 'Spanish 2', 14: 'Spanish 3', 15: 'Land 1', 16: 'Land 2', 17: 'Land 3', 18: 'Holiday', 19: 'Party 1', 20: 'Party 2', 21: 'GM', 22: 'Tricorn', 23: 'Beanie'}}, 1: {'MALE': {0: 'None', 1: 'Tanktop', 2: 'Sleeveless', 3: 'Short Sleeve Round', 4: 'Short Sleeve V-Neck Closed', 5: 'Short Sleeve V-Neck Open', 6: 'Long Sleeve Lowcut Puffy', 7: 'Long Sleeve V-Neck Closed', 8: 'Long Sleeve V-Neck Open', 9: 'Apron', 10: 'Dealer', 11: 'Long Sleeve Puffy', 12: 'Long Sleeve High Neck Puffy'}, 'FEMALE': {0: 'Short Sleeve', 1: 'Short Sleeve Puffy', 2: 'Long Sleeve Puffy', 3: 'Long Sleeve Lowcut', 4: 'Long Sleeve Collar', 5: 'Long Sleeve Tall Collar', 6: 'Dress'}}, 2: {'MALE': {0: 'None', 1: 'Open', 2: 'Closed', 3: 'Long Closed'}, 'FEMALE': {0: 'None', 1: 'Closed', 2: 'Lowcut', 3: 'Corset High', 4: 'Corset Low', 5: 'Navy'}}, 3: {'MALE': {0: 'None', 1: 'Long', 2: 'Short', 3: 'Navy', 4: 'EITC'}, 'FEMALE': {0: 'None', 1: 'Long', 2: 'Short', 3: 'Navy', 3: 'EITC'}}, 4: {'MALE': {0: 'Long Tucked', 1: 'Long Untucked', 2: 'Shorts', 3: 'Short Pants', 4: 'Navy', 5: 'EITC', 6: 'Apron'}, 'FEMALE': {0: 'Short Pants', 1: 'Shorts', 2: 'Skirt', 3: 'Gypsy Dress', 4: 'Shopkeeper Dress', 5: 'Navy'}}, 5: {'MALE': {0: 'None', 1: 'Sash', 2: 'Sash', 3: 'Strap w/ Oval Buckle', 4: 'Strap w/ Oval Buckle', 5: 'Strap w/ Square Buckle', 6: 'Strap w/ Oval Buckle', 7: 'Strap w/ Oval Buckle', 8: 'Strap w/ Oval Buckle', 9: 'Sash', 10: 'Sash', 11: 'Sash', 12: 'Sash', 13: 'Strap w/ Oval Buckle', 14: 'Strap w/ Oval Buckle', 15: 'Strap w/ Square Buckle', 16: 'Strap w/ Square Buckle', 17: 'Sash', 18: 'Strap w/ Square Buckle', 19: 'Strap w/ Square Buckle'}, 'FEMALE': {0: 'None', 1: 'Sash', 2: 'Sash', 3: 'Sasg', 4: 'Sash', 5: 'Strap w/ Square Buckle', 6: 'Strap w/ Square Buckle', 7: 'Strap w/ Square Buckle', 8: 'Strap w/ Square Buckle', 9: 'Strap w/ Square Buckle', 10: 'Strap w/ Square Buckle', 11: 'Sash', 12: 'Sash', 13: 'Strap w/ Square Buckle', 14: 'Strap w/ Square Buckle', 15: 'Sash', 16: 'Strap w/ Square Buckle', 17: 'Strap w/ Square Buckle', 18: 'Sash'}}, 7: {'MALE': {0: 'None', 1: 'Tall', 2: 'Medium', 3: 'Navy', 4: 'India', 5: 'None'}, 'FEMALE': {0: 'None', 1: 'Short', 2: 'Medium', 3: 'Knee High', 4: 'Tall', 5: 'Navy'}}}
SELECTION_CHOICES = {'DEFAULT': {'MALE': {'FACE': [0, 1, 2, 3], 'HAIR': [0, 1, 2, 5, 6, 9, 11, 12], 'BEARD': [0, 1, 2, 3, 4, 5, 6, 8, 9], 'MUSTACHE': [0, 1, 2, 3], 'HAT': {0: [0]}, 'SHIRT': {1: [0, 1, 2], 4: [1, 2, 3]}, 'VEST': {0: [0], 1: [0, 1, 2]}, 'COAT': {0: [0]}, 'PANT': {0: [1, 2], 1: [0]}, 'BELT': {0: [0], 1: [0], 3: [0], 5: [0]}, 'SHOE': {0: [0], 1: [0, 1, 2]}}, 'FEMALE': {'FACE': [0, 1, 2, 3], 'HAIR': [0, 2, 3, 5, 8, 9, 10, 11, 13, 14, 16], 'HAT': {0: [0]}, 'SHIRT': {0: [0], 1: [1], 2: [0], 3: [2]}, 'VEST': {0: [0], 1: [0, 1, 2, 3]}, 'COAT': {0: [0]}, 'PANT': {0: [0, 1], 2: [0]}, 'BELT': {0: [0], 1: [0], 5: [0], 6: [0]}, 'SHOE': {0: [0], 1: [0], 2: [0], 3: [0]}}}, 'NPC': {'MALE': {'FACE': [0, 1, 2, 3, 4, 5, 6], 'HAIR': [0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13], 'HAT': {0: [0], 1: [0], 2: [0, 1, 2, 3, 4, 5], 3: [0], 4: [0], 5: [0], 6: [0, 1, 2, 3, 4, 5, 6], 7: [0, 1, 2, 3, 4], 8: [0, 1, 2, 3], 9: [0, 1, 2, 3, 4, 5, 6], 10: [0], 11: [0], 12: [0, 1, 2], 13: [0, 1], 14: [0, 1], 15: [0, 1, 2], 16: [0, 1, 2, 3], 17: [0, 1, 2, 3], 18: [0, 1, 2], 19: [0, 1], 20: [0, 1], 21: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 22: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 23: [0, 1, 2, 3, 4], 24: [0, 1, 2, 3, 4, 5]}, 'SHIRT': {0: [0], 1: [0, 1, 2, 3, 4], 2: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 3: [0, 1, 2, 3, 4, 5, 6, 7, 8], 4: [0, 1, 2, 3, 4, 5, 6], 5: [0, 1, 2], 6: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 7: [0, 1, 2, 3, 4, 5, 6, 7], 8: [0, 1, 2], 9: [0, 1, 2], 10: [0], 11: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 12: [0]}, 'VEST': {0: [0], 1: [0, 1, 2, 3, 4, 5, 6, 7, 8], 2: [0, 1, 2, 3, 4], 3: [0, 1, 2, 3, 4, 5, 6]}, 'COAT': {0: [0], 1: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 2: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 3: [0], 4: [0]}, 'PANT': {0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 1: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27], 2: [0, 1, 2, 3, 4], 3: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 4: [0], 5: [0], 6: [0, 1, 2]}, 'BELT': {0: [0], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0], 6: [0], 7: [0], 8: [0], 9: [0], 10: [0], 11: [0], 12: [0], 13: [0], 14: [0], 15: [0], 16: [0], 17: [0], 18: [0], 19: [0]}, 'SHOE': {0: [0], 1: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3: [0, 1, 2, 4, 5], 4: [0], 5: [0, 1]}}, 'FEMALE': {'FACE': [0, 1, 2, 3, 4], 'HAIR': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], 'HAT': {0: [0], 1: [0, 1, 2, 3, 4, 5, 6], 2: [0], 3: [0], 4: [0], 5: [0, 1, 2, 3, 4, 5, 6, 7], 6: [0, 1, 2, 3, 4], 7: [0, 1], 8: [0], 9: [0, 1, 2], 10: [0, 1], 11: [0, 1], 12: [0, 1, 2], 13: [0, 1, 2, 3], 14: [0, 1, 2, 3], 15: [0, 1, 2], 16: [0, 1], 17: [0, 1], 18: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 19: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 20: [0, 1, 2, 3, 4], 21: [0, 1, 2, 3, 4, 5], 22: [0], 23: [0, 1, 2, 3, 4]}, 'SHIRT': {0: [0, 1, 2, 3, 4, 5, 6], 1: [0, 1, 2, 3, 4, 5, 6, 7], 2: [0, 1, 2, 3, 4, 5, 6], 3: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 4: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 5: [0, 1, 2, 3, 4, 5, 6, 7], 6: [0, 1, 2, 3]}, 'VEST': {0: [0], 1: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 2: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 3: [0, 1, 2, 4, 5, 6], 4: [0, 1, 2, 3, 4, 5]}, 'COAT': {0: [0], 1: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 2: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], 3: [0], 4: [0]}, 'PANT': {0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 1: [0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], 2: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], 3: [0, 1], 4: [0, 1], 5: [0]}, 'BELT': {0: [0], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0], 6: [0], 7: [0], 8: [0], 9: [0], 10: [0], 11: [0], 12: [0], 13: [0], 14: [0], 15: [0], 16: [0], 17: [0], 18: [0]}, 'SHOE': {0: [0], 1: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 2: [0, 1, 2, 3, 4, 5, 6, 7, 8], 3: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 5: [0]}}}}
textures = {'MALE': {'HAT': [[['hat_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['PM_hat_captain_leather', VBase4(36 / 255.0, 26 / 255.0, 9 / 255.0, 1.0)], ['PM_hat_captain_baron', VBase4(36 / 255.0, 26 / 255.0, 9 / 255.0, 1.0)], ['PM_hat_captain_prince', VBase4(36 / 255.0, 26 / 255.0, 9 / 255.0, 1.0)], ['PM_hat_captain_privateer', VBase4(36 / 255.0, 26 / 255.0, 9 / 255.0, 1.0)]], [['hat_tricorn_brown', VBase4(43 / 255.0, 48 / 255.0, 62 / 255.0, 1.0)], ['hat_tricorn_orange', VBase4(125 / 255.0, 59 / 255.0, 37 / 255.0, 1.0)], ['hat_tricorn_black_skull', VBase4(33 / 255.0, 37 / 255.0, 36 / 255.0, 1.0)], ['hat_tricorn_navy_goldtrim', VBase4(32 / 255.0, 51 / 255.0, 78 / 255.0, 1.0)], ['hat_tricorn_valentines', VBase4(132 / 255.0, 51 / 255.0, 51 / 255.0, 1.0)], ['hat_tricorn_mardiGras', VBase4(132 / 255.0, 51 / 255.0, 51 / 255.0, 1.0)]], [['hat_navy', VBase4(63 / 255.0, 63 / 255.0, 63 / 255.0, 1.0)], ['hat_navy_diplomat', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_navy_seaserpent', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_eitc', VBase4(42 / 255.0, 42 / 255.0, 42 / 255.0, 1.0)]], [['hat_admiral', VBase4(49 / 255.0, 49 / 255.0, 49 / 255.0, 1.0)]], [['hat_bandana_plain', VBase4(149 / 255.0, 149 / 255.0, 149 / 255.0, 1.0)], ['hat_bandana_full_blue', VBase4(192 / 255.0, 192 / 255.0, 192 / 255.0, 1.0)], ['hat_bandana_full_skullcrossbones', VBase4(47 / 255.0, 47 / 255.0, 47 / 255.0, 1.0)], ['hat_bandanna_full_blue_patches', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandanna_full_blue_zigzag', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_full_polkadot_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['pir_t_clo_upt_bandana_thanks08', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_china', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_wildfire', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_bandana_plain', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_full_blue', VBase4(192 / 255.0, 192 / 255.0, 192 / 255.0, 1.0)], ['hat_bandana_full_skullcrossbones', VBase4(47 / 255.0, 47 / 255.0, 47 / 255.0, 1.0)], ['hat_bandanna_full_blue_patches', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandanna_full_blue_zigzag', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_beanie_plain', VBase4(96 / 255.0, 91 / 255.0, 82 / 255.0, 1.0)], ['hat_beanie_black_crossbones', VBase4(12 / 255.0, 10 / 255.0, 11 / 255.0, 1.0)], ['hat_beanie_blue_skull', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_beanie_greensilk', VBase4(0 / 255.0, 128 / 255.0, 0 / 255.0, 1.0)], ['hat_beanie_brown_beads', VBase4(0 / 255.0, 128 / 255.0, 0 / 255.0, 1.0)], ['hat_beanie_bountyhunter', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_beanie_corsair', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_barbossa+hat_barbossa_feather', VBase4(43 / 255.0, 48 / 255.0, 62 / 255.0, 1.0)], ['hat_barb_style_brown+hat_barb_style_brown_feather', VBase4(78 / 255.0, 64 / 255.0, 55 / 255.0, 1.0)], ['hat_barossa_style_hat_blue_knit_band+hat_barossa_style_hat_blue_knit_band_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_barossa_style_hat_brown_buttons+hat_barossa_style_hat_brown_buttons_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_barossa_style_hat_brown_purple_feather+hat_barossa_style_hat_brown_purple_feather_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_barbossa+hat_barbossa_advanced_outfit_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_barbossa_intermediate_outfit+hat_barbossa_intermediate_outfit_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_french+hat_french_feather', VBase4(32 / 255.0, 60 / 255.0, 25 / 255.0, 1.0)], ['hat_tricorn_assassin+hat_feather_assassin', VBase4(32 / 255.0, 60 / 255.0, 25 / 255.0, 1.0)], ['hat_tricorn_peacock+hat_feather_peacock', VBase4(32 / 255.0, 60 / 255.0, 25 / 255.0, 1.0)], ['hat_tricorn_scourge+hat_feather_scourge', VBase4(32 / 255.0, 60 / 255.0, 25 / 255.0, 1.0)]], [['hat_barbossa+hat_spanish_feather', VBase4(75 / 255.0, 50 / 255.0, 25 / 255.0, 1.0)], ['hat_spanish_zombie+hat_feather_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_french_1_blue_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_french_1_dkgreen_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_french_1_violet_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_french_2_black_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_french_2_brown_leather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_french_3_black_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_french_3_navyblue_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_spanish_1_black', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_1_brown', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_1_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_spanish_2_bronze', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_2_steel', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_2_steel_embossed', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_2_steel_rusted', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_spanish_3_black_redband', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_3_brown_leather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_3_burgundy_black', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_3_grey_brownband', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_land_1_black_blueband', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_land_1_brown_leather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_land_1_straw', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_land_2_black_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_land_2_blue_red_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_land_3_steel', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_land_3_steel_goldinlay', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_holiday_blue', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_blue_white_stripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_green', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_orange', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_red_white', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_red_white_stripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_violet', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_white', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_yellow', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_Xmas', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_party_1_blue_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_green_orange', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_lightblue_pink', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_orange_green', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_pink_lightblue', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_purple_yellow', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_red_blue', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_red_yellow', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_yellow_purple', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_yellow_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_party_2_black_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_2_blue_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_2_brown_blackband_buckle', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_2_green_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_2_StPatricks', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_gm_black_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_dkgreen_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_gold_black_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_red_black_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_red_dkgreen_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_rose_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_mushroom', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]]], 'SHIRT': [[['PM_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['PM_shirt_tanktop_sweatstained', VBase4(180 / 255.0, 178 / 255.0, 178 / 255.0, 1.0)], ['PM_shirt_tanktop_stripes', VBase4(179 / 255.0, 164 / 255.0, 147 / 255.0, 1.0)], ['PM_shirt_tanktop_plain', VBase4(228 / 255.0, 227 / 255.0, 227 / 255.0, 1.0)], ['PM_shirt_tanktop_buttons', VBase4(192 / 255.0, 165 / 255.0, 154 / 255.0, 1.0)], ['PM_shirt_tanktop_suspenders', VBase4(218 / 255.0, 200 / 255.0, 174 / 255.0, 1.0)], ['PM_shirt_tanktop_scourge', VBase4(218 / 255.0, 200 / 255.0, 174 / 255.0, 1.0)], ['PM_shirt_tanktop_seaserpent', VBase4(218 / 255.0, 200 / 255.0, 174 / 255.0, 1.0)]], [['PM_shirt_nosleeves_stripe', VBase4(145 / 255.0, 123 / 255.0, 94 / 255.0, 1.0)], ['PM_shirt_nosleeves_ties', VBase4(193 / 255.0, 200 / 255.0, 201 / 255.0, 1.0)], ['PM_shirt_nosleeves_leatherfront', VBase4(169 / 255.0, 177 / 255.0, 185 / 255.0, 1.0)], ['PM_shirt_nosleeves_centerseam', VBase4(234 / 255.0, 233 / 255.0, 211 / 255.0, 1.0)], ['PM_shirt_nosleeves_bluethreebutton', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_nosleeves_palegreen', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_nosleeves_purplesidebuckle', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_nosleeves_salmon', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_nosleeves_flax_brown', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_nosleeves_silk_blue', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_nosleeves_silk_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_nosleeves_silk_white', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_nosleeves_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_shirt_short_round_frontlacing', VBase4(79 / 255.0, 85 / 255.0, 90 / 255.0, 1.0)], ['PM_shirt_short_round_frontbuttons', VBase4(70 / 255.0, 51 / 255.0, 38 / 255.0, 1.0)], ['PM_shirt_short_round_stripes', VBase4(131 / 255.0, 126 / 255.0, 137 / 255.0, 1.0)], ['PM_shirt_short_round_leather_cloth', VBase4(227 / 255.0, 194 / 255.0, 132 / 255.0, 1.0)], ['PM_shirt_short_round_blue_whitecollar', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_short_round_cloth_black', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_short_round_cloth_caramel', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_short_round_darkbrown_buckle', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_short_round_greengold_whitecollar', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_shirt_shared_cloth_metal_buttons', VBase4(169 / 255.0, 170 / 255.0, 169 / 255.0, 1.0)], ['PM_shirt_shared_cloth_plain1', VBase4(172 / 255.0, 172 / 255.0, 172 / 255.0, 1.0)], ['PM_shirt_shared_cloth_plain2', VBase4(162 / 255.0, 164 / 255.0, 162 / 255.0, 1.0)], ['PM_shirt_shared_cloth_browncollar', VBase4(175 / 255.0, 162 / 255.0, 144 / 255.0, 1.0)], ['PM_shirt_shared_cloth_fabricwaistband', VBase4(110 / 255.0, 110 / 255.0, 98 / 255.0, 1.0)], ['PM_shirt_shared_cloth_leatherwaistband', VBase4(123 / 255.0, 85 / 255.0, 80 / 255.0, 1.0)], ['PM_shirt_shared_cloth_yellowcollar', VBase4(116 / 255.0, 161 / 255.0, 158 / 255.0, 1.0)]], [['PM_shirt_shared_cloth_metal_buttons', VBase4(169 / 255.0, 170 / 255.0, 169 / 255.0, 1.0)], ['PM_shirt_shared_cloth_plain1', VBase4(172 / 255.0, 172 / 255.0, 172 / 255.0, 1.0)], ['PM_shirt_shared_cloth_plain2', VBase4(162 / 255.0, 164 / 255.0, 162 / 255.0, 1.0)], ['PM_shirt_shared_cloth_browncollar', VBase4(175 / 255.0, 162 / 255.0, 144 / 255.0, 1.0)], ['PM_shirt_shared_cloth_fabricwaistband', VBase4(110 / 255.0, 110 / 255.0, 98 / 255.0, 1.0)], ['PM_shirt_shared_cloth_leatherwaistband', VBase4(123 / 255.0, 85 / 255.0, 80 / 255.0, 1.0)], ['PM_shirt_shared_cloth_yellowcollar', VBase4(116 / 255.0, 161 / 255.0, 158 / 255.0, 1.0)]], [['PM_shirt_long_sleeve_puffy_ClothWithTies', VBase4(170 / 255.0, 161 / 255.0, 142 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_cloth_brown_leather', VBase4(154 / 255.0, 146 / 255.0, 132 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_plain', VBase4(210 / 255.0, 216 / 255.0, 220 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_center_tie', VBase4(207 / 255.0, 192 / 255.0, 161 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_cream_orangevest', VBase4(95 / 255.0, 47 / 255.0, 17 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_white_brownpillowvest', VBase4(77 / 255.0, 48 / 255.0, 27 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_white_brownvest', VBase4(36 / 255.0, 26 / 255.0, 20 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_white_redvest', VBase4(89 / 255.0, 21 / 255.0, 30 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_white_redvest_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_shirt_shared_cloth_metal_buttons', VBase4(169 / 255.0, 170 / 255.0, 169 / 255.0, 1.0)], ['PM_shirt_shared_cloth_plain1', VBase4(172 / 255.0, 172 / 255.0, 172 / 255.0, 1.0)], ['PM_shirt_shared_cloth_plain2', VBase4(162 / 255.0, 164 / 255.0, 162 / 255.0, 1.0)], ['PM_shirt_shared_cloth_browncollar', VBase4(175 / 255.0, 162 / 255.0, 144 / 255.0, 1.0)], ['PM_shirt_shared_cloth_fabricwaistband', VBase4(110 / 255.0, 110 / 255.0, 98 / 255.0, 1.0)], ['PM_shirt_shared_cloth_leatherwaistband', VBase4(123 / 255.0, 85 / 255.0, 80 / 255.0, 1.0)], ['PM_shirt_shared_cloth_yellowcollar', VBase4(116 / 255.0, 161 / 255.0, 158 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_tan_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_shared_cloth_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_shirt_shared_cloth_metal_buttons', VBase4(169 / 255.0, 170 / 255.0, 169 / 2550.0, 1.0)], ['PM_shirt_shared_cloth_plain1', VBase4(172 / 255.0, 172 / 255.0, 172 / 255.0, 1.0)], ['PM_shirt_shared_cloth_plain2', VBase4(162 / 255.0, 164 / 255.0, 162 / 255.0, 1.0)]], [['PM_shirt_apron', VBase4(82 / 255.0, 88 / 255.0, 93 / 255.0, 1.0)], ['PM_shirt_apron_black', VBase4(82 / 255.0, 88 / 255.0, 93 / 255.0, 1.0)], ['PM_shirt_apron_black', VBase4(82 / 255.0, 88 / 255.0, 93 / 255.0, 1.0)]], [['PM_shirt_shared_cloth_dealer', VBase4(162 / 255.0, 164 / 255.0, 162 / 255.0, 1.0)]], [['PM_shirt_long_sleeve_puffy_cincodemayo', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_halloween', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_thanksgiving', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_guyfawkes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_valentines', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_winterholiday', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_caribbeanday', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_carnival', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_chinesenewyear', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_firstfall', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_newyearseve', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_stpatricks', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_summersolstice', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_wintersolstice', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_firstspring', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_mardiGras', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_Xmas', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_long_sleeve_puffy_corsair', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_shirt_long_sleeve_highneck_plain', VBase4(255 / 255.0, 255 / 255.0, 255 / 2550.0, 1.0)], ['PM_shirt_long_sleeve_highneck_assassin', VBase4(255 / 255.0, 255 / 255.0, 255 / 2550.0, 1.0)], ['PM_shirt_long_sleeve_highneck_baron', VBase4(255 / 255.0, 255 / 255.0, 255 / 2550.0, 1.0)], ['PM_shirt_long_sleeve_highneck_peacock', VBase4(255 / 255.0, 255 / 255.0, 255 / 2550.0, 1.0)], ['PM_shirt_long_sleeve_highneck_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 2550.0, 1.0)]]], 'VEST': [[['PM_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['PM_vest_open_leather_silk', VBase4(172 / 255.0, 108 / 255.0, 60 / 255.0, 1.0)], ['PM_vest_open_PatchworkDark', VBase4(104 / 255.0, 112 / 255.0, 107 / 255.0, 1.0)], ['PM_vest_open_belts', VBase4(96 / 255.0, 75 / 255.0, 53 / 255.0, 1.0)], ['PM_vest_open_clasp', VBase4(91 / 255.0, 109 / 255.0, 109 / 255.0, 1.0)], ['PM_vest_open_buttons', VBase4(70 / 255.0, 98 / 255.0, 108 / 255.0, 1.0)], ['PM_vest_open_blue_silverbuttons', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_vest_open_brown_blacklapel', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_vest_open_brown_redscarf', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_vest_open_green_blacklapel', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_vest_open_bountyhunter', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_vest_closed_silk_stripe_lapel', VBase4(118 / 255.0, 101 / 255.0, 73 / 255.0, 1.0)], ['PM_vest_closed_clasp', VBase4(151 / 255.0, 127 / 255.0, 101 / 255.0, 1.0)], ['PM_vest_closed_lapel', VBase4(187 / 255.0, 158 / 255.0, 108 / 255.0, 1.0)], ['PM_vest_closed_leathertop', VBase4(198 / 255.0, 190 / 255.0, 168 / 255.0, 1.0)], ['PM_vest_closed_stripe', VBase4(174 / 255.0, 163 / 255.0, 163 / 255.0, 1.0)], ['PM_vest_closed_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shirt_shared_cloth_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_vest_closed_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_vest_long_closed_a', VBase4(145 / 255.0, 141 / 255.0, 130 / 255.0, 1.0)], ['PM_vest_long_closed_brown_whitecollar', VBase4(71 / 255.0, 37 / 255.0, 3 / 255.0, 1.0)], ['PM_vest_long_closed_rust', VBase4(76 / 255.0, 30 / 255.0, 14 / 255.0, 1.0)], ['PM_vest_long_closed_white_ropebelt', VBase4(92 / 255.0, 91 / 255.0, 79 / 255.0, 1.0)], ['PM_vest_long_closed_yellowgreen_stripes', VBase4(110 / 255.0, 93 / 255.0, 39 / 255.0, 1.0)], ['PM_vest_long_closed_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_vest_long_closed_blackgold', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_vest_long_closed_wildfire', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]]], 'COAT': [[['PM_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['PM_coat_long_braidsandfloralpattern', VBase4(91 / 255.0, 76 / 255.0, 59 / 255.0, 1.0)], ['PM_coat_long_braids_embroidery', VBase4(67 / 255.0, 61 / 255.0, 41 / 255.0, 1.0)], ['PM_coat_long_cloth_lighttrim', VBase4(143 / 255.0, 144 / 255.0, 164 / 255.0, 1.0)], ['PM_coat_long_darktrim_backties', VBase4(93 / 255.0, 79 / 255.0, 53 / 255.0, 1.0)], ['PM_coat_long_fabric_leatherbelt', VBase4(32 / 255.0, 44 / 255.0, 27 / 255.0, 1.0)], ['PM_coat_long_french', VBase4(41 / 255.0, 36 / 255.0, 38 / 255.0, 1.0)], ['PM_coat_long_leather', VBase4(43 / 255.0, 28 / 255.0, 15 / 255.0, 1.0)], ['PM_coat_long_afro', VBase4(86 / 255.0, 74 / 255.0, 41 / 255.0, 1.0)], ['PM_coat_long_taupe', VBase4(64 / 255.0, 54 / 255.0, 49 / 255.0, 1.0)], ['PM_coat_long_brown', VBase4(69 / 255.0, 42 / 255.0, 21 / 255.0, 1.0)], ['PM_coat_long_blue_yellowtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_gold_blackbuttons', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_green_yellowtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_red_yellowtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_blackgold', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_royal', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_assassin', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_baron', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_scourge', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_wildfire', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_coat_long_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_coat_short_blackwithstitching', VBase4(22 / 255.0, 23 / 255.0, 25 / 255.0, 1.0)], ['PM_coat_short_cloth_darkleather_goldtrim', VBase4(85 / 255.0, 86 / 255.0, 60 / 255.0, 1.0)], ['PM_coat_short_dark_stringtiesback', VBase4(59 / 255.0, 61 / 255.0, 63 / 255.0, 1.0)], ['PM_coat_short_red_blackleathertrim', VBase4(130 / 255.0, 31 / 255.0, 27 / 255.0, 1.0)], ['PM_coat_short_wool_brownleathertrim', VBase4(117 / 255.0, 104 / 255.0, 77 / 255.0, 1.0)], ['PM_coat_short_yellow_blacklapel', VBase4(121 / 255.0, 88 / 255.0, 40 / 255.0, 1.0)], ['PM_coat_short_purple_blackcollar', VBase4(79 / 255.0, 48 / 255.0, 58 / 255.0, 1.0)], ['PM_coat_short_blue_goldtrim', VBase4(33 / 255.0, 51 / 255.0, 59 / 255.0, 1.0)], ['PM_coat_short_black_checkerboard', VBase4(33 / 255.0, 51 / 255.0, 59 / 255.0, 1.0)], ['PM_coat_short_brown_stripes', VBase4(33 / 255.0, 51 / 255.0, 59 / 255.0, 1.0)], ['PM_coat_short_seaserpent', VBase4(33 / 255.0, 51 / 255.0, 59 / 255.0, 1.0)]], [['PM_navy', VBase4(148 / 255.0, 29 / 255.0, 29 / 255.0, 1.0)]], [['PM_eitc', VBase4(31 / 255.0, 33 / 255.0, 31 / 255.0, 1.0)], ['PM_coat_closed_china', VBase4(31 / 255.0, 33 / 255.0, 31 / 255.0, 1.0)], ['PM_coat_closed_diplomat', VBase4(31 / 255.0, 33 / 255.0, 31 / 255.0, 1.0)]]], 'PANT': [[['PM_pant_long_pants_tucked_LeatherGoldButtons', VBase4(83 / 255.0, 70 / 255.0, 53 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_leathergoldbuttons_nopatch', VBase4(131 / 255.0, 106 / 255.0, 71 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_cotton_leathersidepocket', VBase4(154 / 255.0, 164 / 255.0, 170 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_leather_buttonfront', VBase4(63 / 255.0, 63 / 255.0, 63 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_cloth_leatherstripes', VBase4(79 / 255.0, 79 / 255.0, 79 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_leather_miniknives', VBase4(79 / 255.0, 79 / 255.0, 79 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_black_yellowtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_blue_stripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_brown_sidebuttons', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_greygreen', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_red_sidebones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_red_yellowstripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_bluesidetrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_violet_yellowstripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_StPatricks', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_valentines', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_assassin', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_baron', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_bountyhunter', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_china', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_diplomat', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_peacock', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_seaserpent', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_tucked_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_pant_long_pants_untucked_plain3', VBase4(138 / 255.0, 138 / 255.0, 138 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_celticbuttons', VBase4(183 / 255.0, 165 / 255.0, 178 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_twotone', VBase4(186 / 255.0, 182 / 255.0, 187 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_onetone', VBase4(178 / 255.0, 177 / 255.0, 179 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_leatherpocket_trim', VBase4(116 / 255.0, 101 / 255.0, 70 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_leather_skullsnaps_suede', VBase4(213 / 255.0, 186 / 255.0, 140 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_leather_skullsnaps_no_cuff', VBase4(218 / 255.0, 191 / 255.0, 145 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_leather_skullsnaps_no_stripe', VBase4(218 / 255.0, 191 / 255.0, 145 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_plain1', VBase4(137 / 255.0, 124 / 255.0, 97 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_plain2', VBase4(61 / 255.0, 66 / 255.0, 64 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_leather_plain', VBase4(131 / 255.0, 117 / 255.0, 107 / 255.0, 1.0)], ['zomb_long_pants_untucked', VBase4(144 / 255.0, 135 / 255.0, 111 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_bluegreensash', VBase4(44 / 255.0, 66 / 255.0, 64 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_greenbronzesash', VBase4(41 / 255.0, 37 / 255.0, 16 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_blue_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_blackgold', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_brownpatches', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_chaps', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_greenembroidery', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_greensilk', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_Xmas', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_mardiGras', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_white_sidenet', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_white_greenstripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_tan_yellowtop', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_tan_sidestitch', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_blue_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_corsair', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_scourge', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_pant_long_pants_untucked_wildfire', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_pant_shorts_threesidebuttons', VBase4(94 / 255.0, 92 / 255.0, 69 / 255.0, 1.0)], ['PM_pant_shorts_3ties', VBase4(184 / 255.0, 160 / 255.0, 107 / 255.0, 1.0)], ['PM_pant_shorts_1buttonflap', VBase4(224 / 255.0, 213 / 255.0, 205 / 255.0, 1.0)], ['PM_pant_shorts_3buckle', VBase4(122 / 255.0, 122 / 255.0, 99 / 255.0, 1.0)], ['PM_pant_shorts_browncloth', VBase4(52 / 255.0, 30 / 255.0, 9 / 255.0, 1.0)]], [['PM_pant_short_pants_twotonewithsash', VBase4(92 / 255.0, 108 / 255.0, 126 / 255.0, 1.0)], ['PM_pant_short_pants_sidepocket', VBase4(126 / 255.0, 109 / 255.0, 97 / 255.0, 1.0)], ['PM_pant_short_pants_simplecanvas', VBase4(190 / 255.0, 177 / 255.0, 149 / 255.0, 1.0)], ['PM_pant_short_pants_sideleather', VBase4(203 / 255.0, 184 / 255.0, 163 / 255.0, 1.0)], ['PM_pant_short_pants_blue_white_top', VBase4(33 / 255.0, 45 / 255.0, 84 / 255.0, 1.0)], ['PM_pant_short_pants_brown_cloth', VBase4(52 / 255.0, 30 / 255.0, 9 / 255.0, 1.0)], ['PM_pant_short_pants_light_brown', VBase4(125 / 255.0, 87 / 255.0, 43 / 255.0, 1.0)], ['PM_pant_short_pants_rust', VBase4(77 / 255.0, 36 / 255.0, 18 / 255.0, 1.0)], ['PM_pant_short_pants_slate', VBase4(55 / 255.0, 71 / 255.0, 79 / 255.0, 1.0)], ['PM_pant_short_pants_light_brown_enhance', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_navy', VBase4(156 / 255.0, 145 / 255.0, 132 / 255.0, 1.0)]], [['PM_eitc', VBase4(31 / 255.0, 33 / 255.0, 31 / 255.0, 1.0)]], [['PM_pant_apron', VBase4(145 / 255.0, 130 / 255.0, 102 / 255.0, 1.0)], ['PM_pant_apron_black', VBase4(145 / 255.0, 130 / 255.0, 102 / 255.0, 1.0)], ['PM_pant_apron_black', VBase4(145 / 255.0, 130 / 255.0, 102 / 255.0, 1.0)]]], 'BELT': [[['PM_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['PM_belt_sash_plain', VBase4(195 / 255.0, 193 / 255.0, 188 / 255.0, 1.0)]], [['PM_belt_sash_celticbuckle', VBase4(108 / 255.0, 97 / 255.0, 93 / 255.0, 1.0)]], [['PM_belt_strap_oval+PM_belt_buckle_oval', VBase4(65 / 255.0, 43 / 255.0, 1 / 255.0, 1.0)]], [['PM_belt_strap_LeatherBrown+PM_belt_buckle_SkullGold', VBase4(40 / 255.0, 30 / 255.0, 20 / 255.0, 1.0)]], [['PM_belt_strap_black+PM_belt_buckle_square', VBase4(24 / 255.0, 10 / 255.0, 2 / 255.0, 1.0)]], [['PM_belt_strap_blackleather_01+PM_belt_buckle_goldskull_01', VBase4(23 / 255.0, 23 / 255.0, 24 / 255.0, 1.0)]], [['PM_belt_strap_brownleather_01+PM_belt_buckle_ovalgold_01', VBase4(41 / 255.0, 29 / 255.0, 14 / 255.0, 1.0)]], [['PM_belt_strap_blackleather_01+PM_belt_buckle_ovalgold_02', VBase4(23 / 255.0, 23 / 255.0, 24 / 255.0, 1.0)]], [['PM_belt_sash_bluegold', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_sash_goldtassel', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_sash_pink', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_sash_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_strap_oval_gold_brownleather+PM_belt_buckle_oval_gold_brownleather', VBase4(23 / 255.0, 23 / 255.0, 24 / 255.0, 1.0)]], [['PM_belt_strap_oval_goldskull_blackleather+PM_belt_buckle_oval_goldskull_blackleather', VBase4(23 / 255.0, 23 / 255.0, 24 / 255.0, 1.0)]], [['PM_belt_strap_square_sculpture_blackbutton+PM_belt_buckle_square_sculpture_blackbutton', VBase4(24 / 255.0, 10 / 255.0, 2 / 255.0, 1.0)]], [['PM_belt_strap_square_silver_blueleather+PM_belt_buckle_square_silver_blueleather', VBase4(24 / 255.0, 10 / 255.0, 2 / 255.0, 1.0)]], [['PM_belt_sash_red_basic_outfit+PM_belt_sash_red_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_strap_square_advanced_outfit+PM_belt_buckle_square_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_strap_square_advanced_outfit+PM_belt_buckle_square_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_sash_assassin', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_sash_bountyhunter', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_sash_corsair', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_sash_peacock', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_sash_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_strap_privateer+PM_belt_buckle_square_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_strap_scourge+PM_belt_buckle_square_scourge', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_strap_seaserpent+PM_belt_buckle_square_seaserpent', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_belt_strap_zombie+PM_belt_buckle_square_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]]], 'SHOE': [[['PM_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['PM_shoe_tall_boots_TanWithFlap', VBase4(40 / 255.0, 32 / 255.0, 24 / 255.0, 1.0)], ['PM_shoe_tall_boots_2buckle', VBase4(17 / 255.0, 16 / 255.0, 14 / 255.0, 1.0)], ['PM_shoe_tall_boots_lace', VBase4(60 / 255.0, 47 / 255.0, 33 / 255.0, 1.0)], ['PM_shoe_tall_boots_leatherlower', VBase4(35 / 255.0, 27 / 255.0, 24 / 255.0, 1.0)], ['PM_shoe_tall_boots_black_furtop', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_blue_straps', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_brown_furtop', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_brown_laces', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_blue_furtop', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_emerald', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_royal', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_spurs', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_StPatricks', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_tall_boots_valentines', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_medium_boot_china', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_medium_boot_peacock', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_medium_boot_seaserpent', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_shoe_medium_boots_laced', VBase4(5 / 255.0, 5 / 255.0, 5 / 255.0, 1.0)], ['PM_shoe_medium_boots_buckle', VBase4(36 / 255.0, 34 / 255.0, 31 / 255.0, 1.0)], ['PM_shoe_medium_boots_lacefront', VBase4(35 / 255.0, 29 / 255.0, 24 / 255.0, 1.0)], ['PM_shoe_medium_boots_plain', VBase4(36 / 255.0, 32 / 255.0, 27 / 255.0, 1.0)], ['PM_shoe_medium_boots_brown', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_medium_boots_brown_greentops', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_medium_boots_light_brown', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_medium_boots_blue_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_medium_boots_Xmas', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_medium_boots_mardiGras', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_medium_boots_blue', VBase4(0 / 255.0, 0 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_short_boot_bountyhunter', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_shoe_navy', VBase4(63 / 255.0, 58 / 255.0, 48 / 255.0, 1.0)], ['PM_shoe_navy_buckle', VBase4(63 / 255.0, 58 / 255.0, 48 / 255.0, 1.0)], ['PM_shoe_navy_flap', VBase4(63 / 255.0, 58 / 255.0, 48 / 255.0, 1.0)], ['PM_shoe_navy_lace', VBase4(63 / 255.0, 58 / 255.0, 48 / 255.0, 1.0)], ['PM_shoe_navy_singlestrap', VBase4(63 / 255.0, 58 / 255.0, 48 / 255.0, 1.0)], ['PM_shoe_navy_diplomat', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_navy_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['PM_eitc', VBase4(16 / 255.0, 16 / 255.0, 16 / 255.0, 1.0)], ['PM_shoe_eitc_boots_assassin', VBase4(16 / 255.0, 16 / 255.0, 16 / 255.0, 1.0)], ['PM_shoe_eitc_boots_baron', VBase4(16 / 255.0, 16 / 255.0, 16 / 255.0, 1.0)]], [['PM_shoe_cuff_boots_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_cuff_boots_redtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_cuff_boots_corsair', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_cuff_boots_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_cuff_boots_scourge', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_cuff_boots_wildfire', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['PM_shoe_cuff_boots_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]]]}, 'FEMALE': {'HAT': [[['hat_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['FP_hat_dress_base+FP_hat_dress_feather', VBase4(118 / 255.0, 104 / 255.0, 70 / 255.0, 1.0)], ['FP_hat_dress_blue_purplefeather+FP_hat_dress_blue_purplefeather_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_hat_dress_green_stringband+FP_hat_dress_green_stringband_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_hat_dress_pink_bluefeather+FP_hat_dress_pink_bluefeather_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_hat_dress_purple_butterfly+FP_hat_dress_purple_butterfly_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_hat_dress_advanced_outfit+FP_hat_dress_advanced_outfit_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_hat_dress_intermediate_outfit+FP_hat_dress_intermediate_outfit_feather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_hat_dress_privateer+FP_hat_feather_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_navy', VBase4(21 / 255.0, 20 / 255.0, 23 / 255.0, 1.0)], ['hat_navy_diplomat', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_navy_seaserpent', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_navy_hat+FP_hat_dress_feather', VBase4(122 / 255.0, 100 / 255.0, 65 / 255.0, 1.0)], ['FP_hat_featherhat_baroness+FP_hat_featherhat_feather_baroness', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_hat_feather_hat_prince+FP_hat_feather_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_hat_worker', VBase4(162 / 255.0, 162 / 255.0, 162 / 255.0, 1.0)]], [['hat_bandana_plain', VBase4(192 / 255.0, 192 / 255.0, 192 / 255.0, 1.0)], ['hat_bandana_full_blue', VBase4(111 / 255.0, 148 / 255.0, 148 / 255.0, 1.0)], ['hat_bandana_full_skullcrossbones', VBase4(29 / 255.0, 29 / 255.0, 29 / 255.0, 1.0)], ['hat_bandana_full_purple', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_full_redstripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_full_polkadot_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['pir_t_clo_upt_bandana_thanks08', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_redsilk', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_china', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_wildfire', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_bandana_plain', VBase4(192 / 255.0, 192 / 255.0, 192 / 255.0, 1.0)], ['hat_bandana_full_blue', VBase4(111 / 255.0, 148 / 255.0, 148 / 255.0, 1.0)], ['hat_bandana_full_skullcrossbones', VBase4(29 / 255.0, 29 / 255.0, 29 / 255.0, 1.0)], ['hat_bandana_full_purple', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_bandana_full_redstripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_french+hat_french_feather', VBase4(32 / 255.0, 60 / 255.0, 25 / 255.0, 1.0)], ['hat_tricorn_mardiGras+hat_french_feather_mardiGras', VBase4(32 / 255.0, 60 / 255.0, 25 / 255.0, 1.0)], ['hat_tricorn_assassin+hat_feather_assassin', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_tricorn_peacock+hat_feather_peacock', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_tricorn_scourge+hat_feather_scourge', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_barbossa+hat_spanish_feather', VBase4(75 / 255.0, 50 / 255.0, 25 / 255.0, 1.0)], ['hat_spanish_zombie+hat_feather_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_french_1_blue_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_french_1_dkgreen_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_french_1_violet_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_french_2_black_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_french_2_brown_leather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_french_3_black_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_french_3_navyblue_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_spanish_1_black', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_1_brown', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_1_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_spanish_2_bronze', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_2_steel', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_2_steel_embossed', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_2_steel_rusted', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_spanish_3_black_redband', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_3_brown_leather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_3_burgundy_black', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_spanish_3_grey_brownband', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_land_1_black_blueband', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_land_1_brown_leather', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_land_1_straw', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_land_2_black_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_land_2_blue_red_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_land_3_steel', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_land_3_steel_goldinlay', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_holiday_blue', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_blue_white_stripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_green', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_orange', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_red_white', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_red_white_stripes', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_violet', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_white', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_yellow', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_holiday_Xmas', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_party_1_blue_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_green_orange', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_lightblue_pink', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_orange_green', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_pink_lightblue', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_purple_yellow', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_red_blue', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_red_yellow', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_yellow_purple', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_1_yellow_red', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_party_2_black_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_2_blue_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_2_brown_blackband_buckle', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_2_green_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_party_2_StPatricks', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_gm_black_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_dkgreen_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_gold_black_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_red_black_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_red_dkgreen_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_gm_rose_skullcrossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_tricorn_valentines', VBase4(60 / 255.0, 25 / 255.0, 25 / 255.0, 1.0)]], [['hat_beanie_plain', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_beanie_black_crossbones', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_beanie_blue_skull', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_beanie_greensilk', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_beanie_brown_beads', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_beanie_corsair', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['hat_beanie_bountyhunter', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['hat_mushroom', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]]], 'SHIRT': [[['FP_shirt_short_sleeve_stitch', VBase4(181 / 255.0, 180 / 255.0, 168 / 255.0, 1.0)], ['FP_shirt_short_sleeve_3button', VBase4(171 / 255.0, 112 / 255.0, 94 / 255.0, 1.0)], ['FP_shirt_short_sleeve_collar', VBase4(152 / 255.0, 164 / 255.0, 144 / 255.0, 1.0)], ['FP_shirt_short_sleeve_ties', VBase4(142 / 255.0, 134 / 255.0, 150 / 255.0, 1.0)], ['FP_shirt_short_sleeve_bluelace', VBase4(80 / 255.0, 101 / 255.0, 111 / 255.0, 1.0)], ['FP_shirt_short_sleeve_pinkwhite', VBase4(113 / 255.0, 85 / 255.0, 100 / 255.0, 1.0)], ['FP_shirt_short_sleeve_yellowgold', VBase4(150 / 255.0, 131 / 255.0, 91 / 255.0, 1.0)], ['FP_shirt_short_sleeve_scourge', VBase4(150 / 255.0, 131 / 255.0, 91 / 255.0, 1.0)], ['FP_shirt_short_sleeve_seaserpent', VBase4(150 / 255.0, 131 / 255.0, 91 / 255.0, 1.0)]], [['FP_shirt_short_sleeve_puffy_smFrontLacing', VBase4(195 / 255.0, 205 / 255.0, 174 / 255.0, 1.0)], ['FP_shirt_short_sleeve_puffy_2ties', VBase4(224 / 255.0, 207 / 255.0, 182 / 255.0, 1.0)], ['FP_shirt_short_sleeve_puffy_3button', VBase4(151 / 255.0, 153 / 255.0, 135 / 255.0, 1.0)], ['FP_shirt_short_sleeve_puffy_front_bow', VBase4(157 / 255.0, 106 / 255.0, 110 / 255.0, 1.0)], ['FP_shirt_short_sleeve_puffy_lightgreen', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_short_sleeve_puffy_powderblue', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_short_sleeve_puffy_red_creamtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_short_sleeve_puffy_red_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_shirt_long_sleeve_puffy_collarbuttons', VBase4(104 / 255.0, 100 / 255.0, 83 / 255.0, 1.0)], ['FP_shirt_long_sleeve_puffy_broach', VBase4(192 / 255.0, 146 / 255.0, 140 / 255.0, 1.0)], ['FP_shirt_long_sleeve_puffy_front_tie', VBase4(173 / 255.0, 181 / 255.0, 198 / 255.0, 1.0)], ['FP_shirt_long_sleeve_puffy_stitch', VBase4(95 / 255.0, 103 / 255.0, 94 / 255.0, 1.0)], ['FP_shirt_long_sleeve_puffy_blue_whitecuffs', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_puffy_olivegreen', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_puffy_purple_whitecuffs', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_shirt_long_sleeve_lowcut_leather_corset_ruffles', VBase4(211 / 255.0, 194 / 255.0, 165 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_3button', VBase4(103 / 255.0, 106 / 255.0, 93 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_ruffles', VBase4(208 / 255.0, 192 / 255.0, 161 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_ties', VBase4(201 / 255.0, 179 / 255.0, 148 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_brown_greensleeves', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_pink_whitecollar', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_white_greysleeves', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_tan_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_assassin', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_lowcut_corsair', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_shirt_long_sleeve_collar_lacesleeve', VBase4(170 / 255.0, 166 / 255.0, 177 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_collarbuttons', VBase4(81 / 255.0, 93 / 255.0, 78 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_largestripes', VBase4(107 / 255.0, 65 / 255.0, 64 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_stitches', VBase4(89 / 255.0, 96 / 255.0, 94 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_white_brownvest', VBase4(86 / 255.0, 57 / 255.0, 34 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_white_greenvest', VBase4(54 / 255.0, 56 / 255.0, 56 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_white_redvest', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_caribbeanday', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_cincodemayo', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_guyfawkes', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_halloween', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_summersolstice', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_thanksgiving', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_winterholiday', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_carnival', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_chinesenewyear', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_valentines', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_firstfall', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_firstspring', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_newyearseve', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_stpatricks', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_wintersolstice', VBase4(74 / 255.0, 26 / 255.0, 35 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_Xmas', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_mardiGras', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_diplomat', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_collar_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_shirt_long_sleeve_tall_collar_leather_vest_fleur', VBase4(174 / 255.0, 162 / 255.0, 132 / 255.0, 1.0)], ['FP_shirt_long_sleeve_tall_collar_buttons', VBase4(187 / 255.0, 179 / 255.0, 156 / 255.0, 1.0)], ['FP_shirt_long_sleeve_tall_collar_stitch', VBase4(149 / 255.0, 138 / 255.0, 113 / 255.0, 1.0)], ['FP_shirt_long_sleeve_tall_collar_ties', VBase4(237 / 255.0, 228 / 255.0, 203 / 255.0, 1.0)], ['FP_shirt_long_sleeve_tall_collar_green', VBase4(97 / 255.0, 115 / 255.0, 39 / 255.0, 1.0)], ['FP_shirt_long_sleeve_tall_collar_lightblue', VBase4(93 / 255.0, 116 / 255.0, 125 / 255.0, 1.0)], ['FP_shirt_long_sleeve_tall_collar_purplewhite', VBase4(137 / 255.0, 121 / 255.0, 156 / 255.0, 1.0)], ['FP_shirt_long_sleeve_tall_collar_whiteruff', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shirt_long_sleeve_tall_baroness', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_gypsy_dress_a', VBase4(151 / 255.0, 85 / 255.0, 23 / 255.0, 1.0)], ['FP_gypsy_dress_b', VBase4(86 / 255.0, 43 / 255.0, 29 / 255.0, 1.0)], ['FP_bartender_dress_a', VBase4(79 / 255.0, 89 / 255.0, 115 / 255.0, 1.0)], ['FP_shopkeeps_dress_a', VBase4(79 / 255.0, 89 / 255.0, 115 / 255.0, 1.0)]]], 'VEST': [[['FP_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['FP_vest_closed_clothtwobutton', VBase4(169 / 255.0, 176 / 255.0, 180 / 255.0, 1.0)], ['FP_vest_closed_plain', VBase4(188 / 255.0, 191 / 255.0, 165 / 255.0, 1.0)], ['FP_vest_closed_stripes', VBase4(162 / 255.0, 170 / 255.0, 175 / 255.0, 1.0)], ['FP_vest_closed_ties', VBase4(178 / 255.0, 141 / 255.0, 108 / 255.0, 1.0)], ['FP_vest_closed_browngold', VBase4(80 / 255.0, 35 / 255.0, 27 / 255.0, 1.0)], ['FP_vest_closed_brownpurple', VBase4(96 / 255.0, 42 / 255.0, 50 / 255.0, 1.0)], ['FP_vest_closed_lightgreen', VBase4(93 / 255.0, 91 / 255.0, 57 / 255.0, 1.0)], ['FP_vest_closed_redblack', VBase4(23 / 255.0, 15 / 255.0, 15 / 255.0, 1.0)], ['FP_vest_closed_whiteblue', VBase4(53 / 255.0, 63 / 255.0, 70 / 255.0, 1.0)], ['FP_vest_closed_yellowgreen', VBase4(104 / 255.0, 78 / 255.0, 26 / 255.0, 1.0)], ['FP_vest_closed_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_vest_lowcut_clothtwobutton', VBase4(159 / 255.0, 170 / 255.0, 182 / 255.0, 1.0)], ['FP_vest_lowcut_onebutton', VBase4(212 / 255.0, 169 / 255.0, 123 / 255.0, 1.0)], ['FP_vest_lowcut_stripes', VBase4(143 / 255.0, 136 / 255.0, 92 / 255.0, 1.0)], ['FP_vest_lowcut_ties', VBase4(155 / 255.0, 61 / 255.0, 51 / 255.0, 1.0)], ['FP_vest_lowcut_bluegold', VBase4(51 / 255.0, 79 / 255.0, 89 / 255.0, 1.0)], ['FP_vest_lowcut_browngold', VBase4(103 / 255.0, 47 / 255.0, 28 / 255.0, 1.0)], ['FP_vest_lowcut_greenyellow', VBase4(41 / 255.0, 79 / 255.0, 49 / 255.0, 1.0)], ['FP_vest_lowcut_lightyellow', VBase4(116 / 255.0, 116 / 255.0, 55 / 255.0, 1.0)], ['FP_vest_lowcut_purplegold', VBase4(64 / 255.0, 45 / 255.0, 90 / 255.0, 1.0)], ['FP_vest_lowcut_redblack', VBase4(76 / 255.0, 6 / 255.0, 7 / 255.0, 1.0)], ['FP_vest_lowcut_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_vest_lowcut_brownpillowvest', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_vest_lowcut_darkbluegold', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_vest_lowcut_redbrown', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_vest_low_cut_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_corset_high_LeatherStraps', VBase4(82 / 255.0, 82 / 255.0, 82 / 255.0, 1.0)], ['FP_corset_high_FrillyLacy', VBase4(127 / 255.0, 69 / 255.0, 63 / 255.0, 1.0)], ['FP_corset_high_SimpleCanvas', VBase4(118 / 255.0, 106 / 255.0, 61 / 255.0, 1.0)], ['zomb_corset_low_fourlaces', VBase4(121 / 255.0, 124 / 255.0, 103 / 255.0, 1.0)], ['FP_corset_high_bluegray', VBase4(67 / 255.0, 78 / 255.0, 84 / 255.0, 1.0)], ['FP_corset_high_lightblue', VBase4(96 / 255.0, 112 / 255.0, 117 / 255.0, 1.0)], ['FP_corset_high_yellow', VBase4(126 / 255.0, 124 / 255.0, 83 / 255.0, 1.0)], ['FP_corset_high_peacock', VBase4(126 / 255.0, 124 / 255.0, 83 / 255.0, 1.0)], ['FP_corset_high_zombie', VBase4(126 / 255.0, 124 / 255.0, 83 / 255.0, 1.0)]], [['FP_corset_low_fourlaces', VBase4(142 / 255.0, 78 / 255.0, 18 / 255.0, 1.0)], ['FP_corset_low_print', VBase4(110 / 255.0, 130 / 255.0, 150 / 255.0, 1.0)], ['FP_corset_low_ribs', VBase4(243 / 255.0, 224 / 255.0, 186 / 255.0, 1.0)], ['FP_corset_low_blue_whitecross', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_corset_low_green_goldbuttons', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_corset_low_white_redvest', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_corset_low_bountyhunter', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_corset_low_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_corset_low_wildfire', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_navy', VBase4(183 / 255.0, 177 / 255.0, 165 / 255.0, 1.0)]]], 'COAT': [[['FP_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['FP_coat_long_patchwork', VBase4(189 / 255.0, 178 / 255.0, 145 / 255.0, 1.0)], ['FP_coat_long_2button', VBase4(179 / 255.0, 155 / 255.0, 130 / 255.0, 1.0)], ['FP_coat_long_3buttonstripes', VBase4(85 / 255.0, 94 / 255.0, 97 / 255.0, 1.0)], ['FP_coat_long_pockets', VBase4(126 / 255.0, 81 / 255.0, 70 / 255.0, 1.0)], ['FP_coat_long_browngold', VBase4(64 / 255.0, 51 / 255.0, 27 / 255.0, 1.0)], ['FP_coat_long_black_whitesleeves', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_blue_white_collar', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_red_whitesleeves', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_purple', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_purple_enhance_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_redgold_whitesleeves', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_scourge', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_short_seaserpent', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_long_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_coat_short_crocodileskin', VBase4(104 / 255.0, 102 / 255.0, 68 / 255.0, 1.0)], ['FP_coat_short_buttons', VBase4(83 / 255.0, 81 / 255.0, 77 / 255.0, 1.0)], ['FP_coat_short_pockets', VBase4(134 / 255.0, 110 / 255.0, 80 / 255.0, 1.0)], ['FP_coat_short_stripes', VBase4(153 / 255.0, 131 / 255.0, 95 / 255.0, 1.0)], ['FP_coat_short_bluegold', VBase4(40 / 255.0, 45 / 255.0, 56 / 255.0, 1.0)], ['FP_coat_short_blue_black_trim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_short_gold_black_trim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_short_grey_gold_buttons', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_short_white_gold_filagree', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_short_assassin', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_short_baroness', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_short_diplomat', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_coat_short_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_navy', VBase4(148 / 255.0, 29 / 255.0, 29 / 255.0, 1.0)]], [['PM_eitc', VBase4(148 / 255.0, 29 / 255.0, 29 / 255.0, 1.0)], ['FP_coat_closed_china', VBase4(148 / 255.0, 29 / 255.0, 29 / 255.0, 1.0)]]], 'PANT': [[['FP_pant_short_pants_patchwork', VBase4(109 / 255.0, 119 / 255.0, 114 / 255.0, 1.0)], ['FP_pant_short_pants_4buttonflap', VBase4(88 / 255.0, 76 / 255.0, 60 / 255.0, 1.0)], ['FP_pant_short_pants_frontties', VBase4(54 / 255.0, 58 / 255.0, 58 / 255.0, 1.0)], ['FP_pant_short_pants_largesidestripe', VBase4(81 / 255.0, 65 / 255.0, 66 / 255.0, 1.0)], ['FP_pant_short_pants_stitch', VBase4(116 / 255.0, 110 / 255.0, 89 / 255.0, 1.0)], ['FP_pant_short_pants_striped', VBase4(151 / 255.0, 133 / 255.0, 106 / 255.0, 1.0)], ['FP_pant_short_pants_red', VBase4(90 / 255.0, 27 / 255.0, 27 / 255.0, 1.0)], ['FP_pant_short_pants_blue_goldbuttons', VBase4(22 / 255.0, 43 / 255.0, 58 / 255.0, 1.0)], ['FP_pant_short_pants_brightred', VBase4(92 / 255.0, 13 / 255.0, 12 / 255.0, 1.0)], ['FP_pant_short_pants_brown', VBase4(58 / 255.0, 53 / 255.0, 39 / 255.0, 1.0)], ['FP_pant_short_pants_green_goldbuttons', VBase4(48 / 255.0, 74 / 255.0, 32 / 255.0, 1.0)], ['FP_pant_short_pants_greenstripes', VBase4(23 / 255.0, 44 / 255.0, 43 / 255.0, 1.0)], ['FP_pant_short_pants_purple', VBase4(43 / 255.0, 29 / 255.0, 42 / 255.0, 1.0)], ['FP_pant_short_pants_blue_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_goldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_assassin', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_baroness', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_china', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_diplomat', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_scourge', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_seaserpent', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_short_pants_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_pant_shorts_patchwork', VBase4(53 / 255.0, 44 / 255.0, 30 / 255.0, 1.0)], ['FP_pant_shorts_fronttie', VBase4(69 / 255.0, 73 / 255.0, 77 / 255.0, 1.0)], ['FP_pant_shorts_lightcloth', VBase4(110 / 255.0, 94 / 255.0, 82 / 255.0, 1.0)], ['FP_pant_shorts_sidebuttons', VBase4(56 / 255.0, 59 / 255.0, 39 / 255.0, 1.0)], ['FP_pant_shorts_sideties', VBase4(78 / 255.0, 57 / 255.0, 51 / 255.0, 1.0)], ['zomb_pant_shorts_sidebuttons', VBase4(144 / 255.0, 135 / 255.0, 111 / 255.0, 1.0)], ['FP_pant_shorts_green_sidebutton', VBase4(73 / 255.0, 80 / 255.0, 45 / 255.0, 1.0)], ['FP_pant_shorts_blue_stripes', VBase4(44 / 255.0, 59 / 255.0, 70 / 255.0, 1.0)], ['FP_pant_shorts_brownsilver', VBase4(71 / 255.0, 54 / 255.0, 43 / 255.0, 1.0)], ['FP_pant_shorts_pinkgold', VBase4(96 / 255.0, 49 / 255.0, 53 / 255.0, 1.0)], ['FP_pant_shorts_purplegold', VBase4(69 / 255.0, 55 / 255.0, 99 / 255.0, 1.0)], ['FP_pant_shorts_redblack', VBase4(33 / 255.0, 37 / 255.0, 41 / 255.0, 1.0)], ['FP_pant_shorts_redgold', VBase4(117 / 255.0, 20 / 255.0, 20 / 255.0, 1.0)], ['FP_pant_shorts_blackredstrips', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_shorts_brown_silverbutton', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_shorts_brownsilver_enhance', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_shorts_green_tealtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_shorts_pinkgoldtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_shorts_redsilk', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_shorts_mardiGras', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_shorts_bountyhunter', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_shorts_corsair', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_pant_skirt_tan', VBase4(176 / 255.0, 165 / 255.0, 128 / 255.0, 1.0)], ['FP_pant_skirt_patchwork', VBase4(107 / 255.0, 103 / 255.0, 67 / 255.0, 1.0)], ['FP_pant_skirt_layered', VBase4(110 / 255.0, 63 / 255.0, 51 / 255.0, 1.0)], ['FP_pant_skirt_leathertrim', VBase4(151 / 255.0, 138 / 255.0, 99 / 255.0, 1.0)], ['FP_pant_skirt_slip', VBase4(187 / 255.0, 179 / 255.0, 160 / 255.0, 1.0)], ['FP_pant_skirt_plain', VBase4(115 / 255.0, 132 / 255.0, 137 / 255.0, 1.0)], ['FP_pant_skirt_print', VBase4(100 / 255.0, 123 / 255.0, 110 / 255.0, 1.0)], ['FP_pant_skirt_red', VBase4(94 / 255.0, 28 / 255.0, 26 / 255.0, 1.0)], ['FP_pant_skirt_brown', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_green', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_lightblue', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_pink', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_red_whitebelt', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_yellow', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_greenembroidery', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_greenpurple', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_StPatricks', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_Xmas', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_red_whitebelt_valentines', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_peacock', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_pant_skirt_wildfire', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_gypsy_dress_a', VBase4(151 / 255.0, 85 / 255.0, 23 / 255.0, 1.0)], ['FP_gypsy_dress_b', VBase4(86 / 255.0, 43 / 255.0, 29 / 255.0, 1.0)]], [['FP_bartender_dress_a', VBase4(79 / 255.0, 89 / 255.0, 115 / 255.0, 1.0)], ['FP_shopkeeps_dress_a', VBase4(79 / 255.0, 89 / 255.0, 115 / 255.0, 1.0)]], [['FP_navy', VBase4(230 / 255.0, 230 / 255.0, 230 / 255.0, 1.0)]]], 'BELT': [[['FP_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['FP_belt_sash_goldbuckle', VBase4(97 / 255.0, 90 / 255.0, 85 / 255.0, 1.0)]], [['FP_belt_sash_pattern', VBase4(46 / 255.0, 48 / 255.0, 17 / 255.0, 1.0)]], [['FP_belt_sash_tassles', VBase4(58 / 255.0, 42 / 255.0, 26 / 255.0, 1.0)]], [['FP_belt_sash_goldbuckle', VBase4(97 / 255.0, 90 / 255.0, 85 / 255.0, 1.0)]], [['FP_belt_strap_black+FP_belt_buckle_square_dark', VBase4(24 / 255.0, 10 / 255.0, 2 / 255.0, 1.0)]], [['FP_belt_strap_RivetsSkullBuckle+FP_belt_buckle_square', VBase4(31 / 255.0, 23 / 255.0, 13 / 255.0, 1.0)]], [['FP_belt_strap_cloth+FP_belt_buckle_corners', VBase4(35 / 255.0, 39 / 255.0, 4 / 255.0, 1.0)]], [['FP_belt_strap_studs+FP_belt_buckle_circles', VBase4(41 / 255.0, 35 / 255.0, 19 / 255.0, 1.0)]], [['FP_belt_strap_ties+FP_belt_buckle_pattern', VBase4(49 / 255.0, 33 / 255.0, 12 / 255.0, 1.0)]], [['FP_belt_strap_weave+FP_belt_buckle_weave', VBase4(52 / 255.0, 43 / 255.0, 27 / 255.0, 1.0)]], [['FP_belt_sash_blue_belt', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_belt_sash_red_furtrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_belt_strap_square_brown_silvertrim+FP_belt_buckle_square_brown_silvertrim', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_belt_strap_square_gold_design+FP_belt_buckle_square_gold_design', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_belt_sash_red_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_belt_strap_square_advanced_outfit+FP_belt_buckle_square_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_belt_strap_square_intermediate_outfit+FP_belt_buckle_square_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_belt_sash_mardiGras', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]], [['FP_belt_sash_assassin', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]], [['FP_belt_sash_bountyhunter', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]], [['FP_belt_sash_corsair', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]], [['FP_belt_sash_peacock', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]], [['FP_belt_strap_privateer+FP_belt_buckle_square_privateer', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]], [['FP_belt_strap_scourge+FP_belt_buckle_square_scourge', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]], [['FP_belt_strap_seaserpent+FP_belt_buckle_square_seaserpent', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]], [['FP_belt_sash_wildfire', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]], [['FP_belt_strap_zombie+FP_belt_buckle_square_zombie', VBase4(255 / 255.0, 0 / 255.0, 0 / 255.0, 1.0)]]], 'SHOE': [[['FP_none', VBase4(1.0, 1.0, 1.0, 1.0)]], [['FP_shoe_short_boots_celticpattern', VBase4(32 / 255.0, 28 / 255.0, 23 / 255.0, 1.0)], ['FP_shoe_short_boots_3buckle', VBase4(33 / 255.0, 27 / 255.0, 11 / 255.0, 1.0)], ['FP_shoe_short_boots_plain', VBase4(34 / 255.0, 25 / 255.0, 18 / 255.0, 1.0)], ['FP_shoe_short_boots_weave', VBase4(34 / 255.0, 31 / 255.0, 20 / 255.0, 1.0)], ['FP_shoe_short_boots_black_torntop', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_boots_brown_sidebuttons', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_boots_brown_sidelaces', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_boots_brown_stitching', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_boots_blue_basic_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_boots_advanced_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_boots_roundbuckle', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_boots_Xmas', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_boots_valentines', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_boots_mardiGras', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_diplomat', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_short_prince', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_shoe_medium_boots_BuckleSkullSole', VBase4(23 / 255.0, 14 / 255.0, 5 / 255.0, 1.0)], ['FP_shoe_medium_boots_studs', VBase4(23 / 255.0, 23 / 255.0, 20 / 255.0, 1.0)], ['FP_shoe_medium_boots_ties', VBase4(9 / 255.0, 8 / 255.0, 7 / 255.0, 1.0)], ['FP_shoe_medium_boots_weavebuckle', VBase4(18 / 255.0, 12 / 255.0, 1 / 255.0, 1.0)], ['FP_shoe_medium_boots_black-topstitch', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_medium_boots_brown-sidestitch', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_medium_boots_orange', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_medium_boots_purple', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_medium_boots_greenpurple', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_medium_boots_baroness', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_medium_boots_privateer', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_shoe_knee_high_boots_brown', VBase4(20 / 255.0, 17 / 255.0, 14 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_smallchains', VBase4(5 / 255.0, 5 / 255.0, 9 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_buckle', VBase4(17 / 255.0, 15 / 255.0, 12 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_plain', VBase4(47 / 255.0, 35 / 255.0, 14 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_ties', VBase4(36 / 255.0, 24 / 255.0, 8 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_blue', VBase4(30 / 255.0, 44 / 255.0, 56 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_burgundy', VBase4(54 / 255.0, 16 / 255.0, 21 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_lightgreen', VBase4(72 / 255.0, 82 / 255.0, 51 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_tan', VBase4(87 / 255.0, 70 / 255.0, 37 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_goldbuttons', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_StPatricks', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_assassin', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_peacock', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_scourge', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_wildfire', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_knee_high_boots_zombie', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_shoe_tall_boots_celticstraps', VBase4(17 / 255.0, 16 / 255.0, 11 / 255.0, 1.0)], ['FP_shoe_tall_boots_1buckle', VBase4(25 / 255.0, 22 / 255.0, 17 / 255.0, 1.0)], ['FP_shoe_tall_boots_plain', VBase4(60 / 255.0, 42 / 255.0, 16 / 255.0, 1.0)], ['FP_shoe_tall_boots_weave', VBase4(30 / 255.0, 17 / 255.0, 14 / 255.0, 1.0)], ['FP_shoe_tall_boots_blue_stitches', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_tall_boots_red_anklebelts', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_tall_boots_teal_stitches', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_tall_boots_intermediate_outfit', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_tall_boots_silverstraps', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_tall_boots_violet_stitches', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_tall_boots_bountyhunter', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_tall_boots_china', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_tall_boots_corsair', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)], ['FP_shoe_tall_boots_seaserpent', VBase4(255 / 255.0, 255 / 255.0, 255 / 255.0, 1.0)]], [['FP_navy', VBase4(63 / 255.0, 58 / 255.0, 48 / 255.0, 1.0)]]]}}
navy_coat_geoms = [
3, 4]
navy_pant_geoms = [4, 5]
shopkeep_pant_geoms = [6]
quickConfirmSet = set()
for gender in textures.keys():
if gender == 'MALE':
genderName = 'm'
else:
if gender == 'FEMALE':
genderName = 'f'
clothing = textures[gender]
for clothingType in clothing.keys():
models = clothing[clothingType]
for i in xrange(len(models)):
for j in xrange(len(models[i])):
quickConfirmSet.add((genderName, clothingType, i, j))
def getRandomClothingColor(level, pick=True):
possibleColors = [
0]
for levelKey in DYE_COLOR_LEVEL:
if level >= levelKey:
possibleColors += DYE_COLOR_LEVEL[levelKey]
if random.choice([0, 0, 1]):
return 0
else:
return random.choice(possibleColors)
TYPE_INDEX = 0
def getItemType(itemId):
item = getItemById(itemId)
if item:
return item[TYPE_INDEX]
else:
return -1
ClothingTypeNames = {HAT: PLocalizer.Hat, SHIRT: PLocalizer.Shirt, VEST: PLocalizer.Vest, COAT: PLocalizer.Coat, PANT: PLocalizer.Pants, BELT: PLocalizer.Belt, SOCK: PLocalizer.Belt, SHOE: PLocalizer.Shoes}
def getItemTypeName(itemId):
itemType = getItemType(itemId)
return ClothingTypeNames.get(itemType, None)
def getClothingTypeName(typeId):
return ClothingTypeNames.get(typeId, '')
BASIC_OUTFIT_PART_A = 0
BASIC_OUTFIT_PART_B = 1
BASIC_OUTFIT_PART_C = 2
BASIC_OUTFIT_PART_D = 3
BASIC_OUTFIT_PART_E = 4
INTERMEDIATE_OUTFIT_PART_A = 5
INTERMEDIATE_OUTFIT_PART_B = 6
INTERMEDIATE_OUTFIT_PART_C = 7
INTERMEDIATE_OUTFIT_PART_D = 8
INTERMEDIATE_OUTFIT_PART_E = 9
INTERMEDIATE_OUTFIT_PART_F = 10
ADVANCED_OUTFIT_PART_A = 11
ADVANCED_OUTFIT_PART_B = 12
ADVANCED_OUTFIT_PART_C = 13
ADVANCED_OUTFIT_PART_D = 14
ADVANCED_OUTFIT_PART_E = 15
ADVANCED_OUTFIT_PART_F = 16
ADVANCED_OUTFIT_PART_G = 17
VALENTINES_SHIRT = 18
POKER_BONUS_HAT = 19
questDrops = {BASIC_OUTFIT_PART_A: {'m': [ItemGlobals.RECRUIT_BANDANA, 0], 'f': [ItemGlobals.RECRUIT_BANDANA, 0]}, BASIC_OUTFIT_PART_B: {'m': [ItemGlobals.RECRUIT_LONG_SLEEVE, 0], 'f': [ItemGlobals.RECRUIT_TOP, 0]}, BASIC_OUTFIT_PART_C: {'m': [ItemGlobals.RECRUIT_TROUSERS, 0], 'f': [ItemGlobals.RECRUIT_CAPRIS, 0]}, BASIC_OUTFIT_PART_D: {'m': [ItemGlobals.RECRUIT_SASH, 0], 'f': [ItemGlobals.RECRUIT_SASH, 0]}, BASIC_OUTFIT_PART_E: {'m': [ItemGlobals.RECRUIT_BOOTS, 0], 'f': [ItemGlobals.RECRUIT_SHORT_BOOTS, 0]}, INTERMEDIATE_OUTFIT_PART_A: {'m': [ItemGlobals.TRAVELERS_OSTRICH_HAT, 0], 'f': [ItemGlobals.TRAVELERS_CAVALRY_HAT, 0]}, INTERMEDIATE_OUTFIT_PART_B: {'m': [ItemGlobals.TRAVELERS_PUFFY_SHIRT, 0], 'f': [ItemGlobals.TRAVELERS_TOP, 0]}, INTERMEDIATE_OUTFIT_PART_C: {'m': [ItemGlobals.TRAVELERS_VEST, 0], 'f': [ItemGlobals.TRAVELERS_LOOSE_VEST, 0]}, INTERMEDIATE_OUTFIT_PART_D: {'m': [ItemGlobals.TRAVELERS_TROUSERS, 0], 'f': [ItemGlobals.TRAVELERS_CAPRIS, 0]}, INTERMEDIATE_OUTFIT_PART_E: {'m': [ItemGlobals.SQUARE_TRAVELERS_BELT, 0], 'f': [ItemGlobals.TRAVELERS_BELT, 0]}, INTERMEDIATE_OUTFIT_PART_F: {'m': [ItemGlobals.TRAVELERS_BOOTS, 0], 'f': [ItemGlobals.TRAVELERS_TALL_BOOTS, 0]}, ADVANCED_OUTFIT_PART_A: {'m': [ItemGlobals.ADVENTURE_OSTRICH_HAT, 0], 'f': [ItemGlobals.ADVENTURE_CAVALRY_HAT, 0]}, ADVANCED_OUTFIT_PART_B: {'m': [ItemGlobals.ADVANCED_TANK, 0], 'f': [ItemGlobals.ADVENTURE_TOP, 0]}, ADVANCED_OUTFIT_PART_C: {'m': [ItemGlobals.OPEN_ADVENTURE_VEST, 0], 'f': [ItemGlobals.ADVENTURE_VEST, 0]}, ADVANCED_OUTFIT_PART_D: {'m': [ItemGlobals.ADVENTURE_LONG_COAT, 0], 'f': [ItemGlobals.ADVENTURE_RIDING_COAT, 0]}, ADVANCED_OUTFIT_PART_E: {'m': [ItemGlobals.ADVENTURE_BREECHES, 0], 'f': [ItemGlobals.ADVENTURE_CAPRIS, 0]}, ADVANCED_OUTFIT_PART_F: {'m': [ItemGlobals.SQUARE_ADVENTURE_BELT, 0], 'f': [ItemGlobals.ADVENTURE_BELT, 0]}, ADVANCED_OUTFIT_PART_G: {'m': [ItemGlobals.ADVENTURE_BOOTS, 0], 'f': [ItemGlobals.ADVENTURE_SHORT_BOOTS, 0]}, VALENTINES_SHIRT: {'m': [ItemGlobals.VALENTINES_SHIRT, 0], 'f': [ItemGlobals.VALENTINES_BLOUSE, 0]}, POKER_BONUS_HAT: {'m': [ItemGlobals.MAGENTA_OSTRICH_HAT, 0], 'f': [ItemGlobals.PURPLE_CAVALRY_HAT, 0]}}
quest_items = [
ItemGlobals.RECRUIT_BANDANA, ItemGlobals.RECRUIT_LONG_SLEEVE, ItemGlobals.RECRUIT_TOP, ItemGlobals.RECRUIT_TROUSERS, ItemGlobals.RECRUIT_CAPRIS, ItemGlobals.RECRUIT_SASH, ItemGlobals.RECRUIT_BOOTS, ItemGlobals.RECRUIT_SHORT_BOOTS, ItemGlobals.TRAVELERS_OSTRICH_HAT, ItemGlobals.TRAVELERS_CAVALRY_HAT, ItemGlobals.TRAVELERS_PUFFY_SHIRT, ItemGlobals.TRAVELERS_TOP, ItemGlobals.TRAVELERS_VEST, ItemGlobals.TRAVELERS_LOOSE_VEST, ItemGlobals.TRAVELERS_TROUSERS, ItemGlobals.TRAVELERS_CAPRIS, ItemGlobals.SQUARE_TRAVELERS_BELT, ItemGlobals.TRAVELERS_BELT, ItemGlobals.TRAVELERS_BOOTS, ItemGlobals.TRAVELERS_TALL_BOOTS, ItemGlobals.ADVENTURE_OSTRICH_HAT, ItemGlobals.ADVENTURE_CAVALRY_HAT, ItemGlobals.ADVANCED_TANK, ItemGlobals.ADVENTURE_TOP, ItemGlobals.OPEN_ADVENTURE_VEST, ItemGlobals.ADVENTURE_VEST, ItemGlobals.ADVENTURE_LONG_COAT, ItemGlobals.ADVENTURE_RIDING_COAT, ItemGlobals.ADVENTURE_BREECHES, ItemGlobals.ADVENTURE_CAPRIS, ItemGlobals.SQUARE_ADVENTURE_BELT, ItemGlobals.ADVENTURE_BELT, ItemGlobals.ADVENTURE_BOOTS, ItemGlobals.ADVENTURE_SHORT_BOOTS, ItemGlobals.VALENTINES_SHIRT, ItemGlobals.VALENTINES_BLOUSE, ItemGlobals.MAGENTA_OSTRICH_HAT, ItemGlobals.PURPLE_CAVALRY_HAT]
def subtypeFromId(gen, tpNum, stNum):
section = CLOTHING_NAMES[tpNum][gen][stNum]
return section
def texFromId(gen, tpNum, stNum, texNum):
texture = textures[gen][CLOTHING_STRING[tpNum]][stNum][texNum][0]
return texture
def getLastModel(gen, type):
return len(textures[gen][type]) - 1
def getLastTexture(gen, type, model):
return len(textures[gen][type][model]) - 1
def doesTextureExist(gen, type, modelNum, texNum):
try:
model = textures[gen][CLOTHING_STRING[type]][modelNum]
except:
return False
else:
if not model:
return False
try:
texture = model[texNum]
except:
return False
if not texture:
return False
return True
def isInMaP(id, gender, type, subT, tex):
if subT in SELECTION_CHOICES['DEFAULT'][gender][CLOTHING_STRING[type]]:
if tex in SELECTION_CHOICES['DEFAULT'][gender][CLOTHING_STRING[type]][subT]:
return True
else:
return False
else:
return False
def isQuestDrop(id):
if id in quest_items:
return True
else:
return False
def printList():
for gender in textures:
for type in textures[gender]:
for item in textures[gender][type]:
for subtype in item:
outVal = [
int(subtype[1].getX() * 256), int(subtype[1].getY() * 256), int(subtype[1].getZ() * 256)]
print str(subtype[0]), str(outVal)
def printList2():
for gender in textures:
for type in textures[gender]:
itemNum = 0
for item in textures[gender][type]:
subtypeNum = 0
for subtype in item:
map = False
if itemNum in SELECTION_CHOICES['DEFAULT'][gender][type]:
if subtypeNum in SELECTION_CHOICES['DEFAULT'][gender][type][itemNum]:
map = True
print str(subtype[0]) + ';', map
subtypeNum = subtypeNum + 1
itemNum = itemNum + 1
| 492
| 73,666
| 0.648924
| 17,113
| 89,052
| 3.159469
| 0.045755
| 0.184434
| 0.175557
| 0.166457
| 0.76635
| 0.720445
| 0.706869
| 0.661815
| 0.617815
| 0.587076
| 0
| 0.274918
| 0.137448
| 89,052
| 181
| 73,667
| 492
| 0.428982
| 0.002369
| 0
| 0.2
| 0
| 0
| 0.305875
| 0.255412
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.048276
| null | null | 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6cb07f3856f934f340b6ac169902a27ebd9e9157
| 13,198
|
py
|
Python
|
pysmFISH/counting.py
|
ambrosejcarr/pysmFISH
|
0eb24355f70c0d5c9013a9407fd56f2e1e9ee3cb
|
[
"MIT"
] | 5
|
2018-05-29T23:03:19.000Z
|
2022-02-02T02:04:41.000Z
|
pysmFISH/counting.py
|
ambrosejcarr/pysmFISH
|
0eb24355f70c0d5c9013a9407fd56f2e1e9ee3cb
|
[
"MIT"
] | 3
|
2018-12-18T20:18:38.000Z
|
2019-01-18T22:47:45.000Z
|
pysmFISH/counting.py
|
ambrosejcarr/pysmFISH
|
0eb24355f70c0d5c9013a9407fd56f2e1e9ee3cb
|
[
"MIT"
] | 5
|
2018-08-10T14:54:54.000Z
|
2021-10-09T13:32:08.000Z
|
import pickle
import numpy as np
from skimage import io, img_as_float
from .dots_calling import thr_calculator
from .filtering import smFISH_filtering, nuclei_filtering
def filtering_and_counting(fpath_img_to_filter,filtered_png_img_gene_dirs,filtered_img_gene_dirs,
counting_gene_dirs, illumination_correction=False ,plane_keep=None, min_distance=5, stringency=0,
skip_genes_counting=None,skip_tags_counting=None):
"""
Function used to clean the images and to count the smFISH dots.
It is designed to process in parallel all the tmp file images
stored as numpy arrays after conversion from the microscope format.
Parameters:
------------
fpath_img_to_filter: str
path to the file to process
filtered_png_img_gene_dirs: list
list of the paths of the directories where the filtered images as are
saved as pngs.
filtered_img_gene_dirs: list
list of the paths of the directories where the filtered images are saved
as .npy.
counting_gene_dirs: list
list of the paths of the directories where the countings of the filtered
images are saved.
illumination_correction: bool
if True the illumination correction is run on the dataset.
plane_keep: list
start and end point of the z-planes to keep. Default None
keep all the planes (ex. [2,-3]).
min_distance: int
minimum distance between dots.
stringency: int
stringency use to select the threshold used for counting.
skip_genes_counting: list
list of the genes to skip for counting count.
skip_tags_counting: list
list of the tags inside the genes/stainings name to avoid to count.
"""
# Get infos from file name
fname_split = fpath_img_to_filter.split('/')[-1].split('_')
experiment_name = fname_split[0]
hyb = fname_split[1]
gene = fname_split[2]
pos = fname_split[4].split('.')[0]
# Load the image to process
img_stack = np.load(fpath_img_to_filter) # image is np.uint16
img_stack = img_as_float(img_stack)
# Remove extra planes. As it is for now this step is mainly for single image
# usage. I will include the automatic excess planes remove function to use
# for large scale image analysis later on
if isinstance(plane_keep,list):
img_stack = img_stack[plane_keep[0]:plane_keep[1],:,:]
# Filtering image according to gene
if gene in skip_genes_counting or [tag for tag in skip_tags_counting if tag in gene]:
# Remove the background from the nuclei
img_filtered = nuclei_filtering(img_stack)
counting_dict = None
else:
# Remove background and enhance smFISH signal
img_filtered=smFISH_filtering(img_stack)
# Count the dots in the whole image
counting_dict=thr_calculator(img_filtered,min_distance,stringency)
# Non converted img
img_filtered_original = img_filtered.copy()
# Convert image to uint16
# Clip the values above 1
img_filtered[img_filtered>1] = 1
# Scale to the max of the uint16
img_filtered *= np.iinfo(np.uint16).max
# Round and convert to integer
img_filtered = np.uint16(np.rint(img_filtered))
# Save images and dictionary
# This part may be removed from the function in case we will run
# temporary storage in RAM in order to reduce i/o to the common
# HD of the cluster
# Identify the directory for storing the images and the counting
img_saving_dir_npy=[saving_dir for saving_dir in filtered_img_gene_dirs if gene in saving_dir.split('/')[-2] ][0]
img_saving_dir_png=[saving_dir for saving_dir in filtered_png_img_gene_dirs if gene in saving_dir.split('/')[-2] ][0]
# Save the images and the counting if performed
fname_png = img_saving_dir_png+experiment_name+'_'+hyb+'_'+gene+'_'+'pos_'+pos+'.png'
io.imsave(fname_png,img_filtered)
fname_npy = img_saving_dir_npy+experiment_name+'_'+hyb+'_'+gene+'_'+'pos_'+pos+'.npy'
np.save(fname_npy,img_filtered_original,allow_pickle=False)
if counting_dict:
# may missing if I don't want the counting
counting_saving_dir=[saving_dir for saving_dir in counting_gene_dirs if gene in saving_dir.split('/')[-2] ][0]
fname = counting_saving_dir+experiment_name+'_'+hyb+'_'+gene+'_'+'pos_'+pos+'.pkl'
pickle.dump(counting_dict,open(fname,'wb'))
return
def filtering_and_counting_experiment(fpath_img_to_filter,filtered_dir_path,
counting_dir_path,exp_name,add_slash,plane_keep=None,
min_distance=5,stringency=0):
"""
Function to filter and count dots in the images generated from a small
experiment.
Parameters:
------------
fpath_img_to_filter: str
path to the file to process.
filtered_dir_path: list
list of the paths of the directories where the filtered images are saved.
counting_dir_path: list
list of the paths of the directories where the counting of filtered
images are stored.
exp_name: str
name of the experiment to process.
plane_keep: list
start and end point of the z-planes to keep. Default None
keep all the planes (ex. [2,-3]).
min_distance: int
minimum distance between dots.
stringency: int
stringency use to select the threshold used for counting.
"""
# Load the image to process
img_stack = np.load(fpath_img_to_filter) # image is np.uint16
img_stack = img_as_float(img_stack)
# Remove extra planes. As it is for now this step is mainly for single image
# usage. I will include the automatic excess planes remove function to use
# for large scale image analysis later on
if plane_keep:
img_stack = img_stack[plane_keep[0]:plane_keep[1],:,:]
channel = fpath_img_to_filter.split(add_slash)[-1].split('_')[-3]
fov = fpath_img_to_filter.split(add_slash)[-1].split('_')[-1].split('.')[0]
not_counting=['Nuclei','Dapi','DAPI']
# Filtering image according to gene
if channel in not_counting or '-IF' in channel or channel == 'polyA':
# Remove the background from the nuclei
img_filtered = nuclei_filtering(img_stack)
counting_dict = None
else:
# Remove background and enhance smFISH signal
img_filtered=smFISH_filtering(img_stack)
# Count the dots in the whole image
counting_dict=thr_calculator(img_filtered,min_distance,stringency)
# Convert image to uint16
# Clip the values above 1
img_filtered[img_filtered>1] = 1
# Scale to the max of the uint16
img_filtered *= np.iinfo(np.uint16).max
# Round and convert to integer
img_filtered = np.uint16(np.rint(img_filtered))
fname = fpath_img_to_filter.split(add_slash)[-1][:-4]
fname_path_png = filtered_dir_path+add_slash+exp_name+'_'+fname+'.png'
io.imsave(fname_path_png,img_filtered)
if counting_dict:
fname_path_pkl = counting_dir_path+add_slash+exp_name+'_'+fname+'.pkl'
pickle.dump(counting_dict,open(fname_path_pkl,'wb'))
return
def filtering_and_counting_ill_correction(fpath_img_to_filter,illumination_function, filtered_png_img_gene_dirs,filtered_img_gene_dirs,
counting_gene_dirs, illumination_correction=False ,plane_keep=None, min_distance=5, stringency=0,
skip_genes_counting=None,skip_tags_counting=None):
"""
Function used to clean the images and to count the smFISH dots.
Designed to work in parallel processing all the tmp file images
stored as numpy arrays after conversion from the microscope format.
Parameters:
------------
fpath_img_to_filter: str
path to the file to process.
illumination_function: np.array float64
illumination function.
filtered_png_img_gene_dirs: list
list of the paths of the directories where the filtered images as are
saved as pngs.
filtered_img_gene_dirs: list
list of the paths of the directories where the filtered images are saved
as .npy.
counting_gene_dirs: list
list of the paths of the directories where the countings of the filtered
images are saved.
illumination_correction: bool
if True the illumination correction is run on the dataset.
plane_keep: list
start and end point of the z-planes to keep. Default None
keep all the planes (ex. [2,-3]).
min_distance: int
minimum distance between dots.
stringency: int
stringency use to select the threshold used for counting.
skip_genes_counting: list
list of the genes to skip for counting count.
skip_tags_counting: list
list of the tags inside the genes/stainings name to avoid to count.
"""
# Get infos from file name
fname_split = fpath_img_to_filter.split('/')[-1].split('_')
experiment_name = fname_split[0]
hyb = fname_split[1]
gene = fname_split[2]
pos = fname_split[4].split('.')[0]
# Load the image to process
img_stack = np.load(fpath_img_to_filter) # image is np.uint16
img_stack = img_as_float(img_stack)
# Remove extra planes. As it is for now this step is mainly for single image
# usage. I will include the automatic excess planes remove function to use
# for large scale image analysis later on
if isinstance(plane_keep,list):
img_stack = img_stack[plane_keep[0]:plane_keep[1],:,:]
# Correct for illumination
img_stack = img_stack/illumination_function
# Filtering image according to gene
if gene in skip_genes_counting or [tag for tag in skip_tags_counting if tag in gene]:
# Remove the background from the nuclei
img_filtered = nuclei_filtering(img_stack)
counting_dict = None
else:
# Remove background and enhance smFISH signal
img_filtered=smFISH_filtering(img_stack)
# Count the dots in the whole image
counting_dict=thr_calculator(img_filtered,min_distance,stringency)
# Non converted img
img_filtered_original = img_filtered.copy()
# Convert image to uint16
# Clip the values above 1
img_filtered[img_filtered>1] = 1
# Scale to the max of the uint16
img_filtered *= np.iinfo(np.uint16).max
# Round and convert to integer
img_filtered = np.uint16(np.rint(img_filtered))
# Save images and dictionary
# This part may be removed from the function in case we will run
# temporary storage in RAM in order to reduce i/o to the common
# HD of the cluster
# Identify the directory for storing the images and the counting
img_saving_dir_npy=[saving_dir for saving_dir in filtered_img_gene_dirs if gene in saving_dir.split('/')[-2] ][0]
img_saving_dir_png=[saving_dir for saving_dir in filtered_png_img_gene_dirs if gene in saving_dir.split('/')[-2] ][0]
# Save the images and the counting if performed
fname_png = img_saving_dir_png+experiment_name+'_'+hyb+'_'+gene+'_'+'pos_'+pos+'.png'
io.imsave(fname_png,img_filtered)
fname_npy = img_saving_dir_npy+experiment_name+'_'+hyb+'_'+gene+'_'+'pos_'+pos+'.npy'
np.save(fname_npy,img_filtered_original,allow_pickle=False)
if counting_dict:
# may missing if I don't want the counting
counting_saving_dir=[saving_dir for saving_dir in counting_gene_dirs if gene in saving_dir.split('/')[-2] ][0]
fname = counting_saving_dir+experiment_name+'_'+hyb+'_'+gene+'_'+'pos_'+pos+'.pkl'
pickle.dump(counting_dict,open(fname,'wb'))
return
def counting_only(fpath_img_to_count,counting_gene_dirs, min_distance=5, stringency=0):
"""
Function used to clean the images and to count the smFISH dots.
It is designed to process in parallel all the tmp file images
stored as numpy arrays after conversion from the microscope format.
Parameters:
------------
fpath_img_to_count: str
path to the file to process
counting_gene_dirs: list
list of the paths of the directories where the countings of the filtered
images are saved.
min_distance: int
minimum distance between dots.
stringency: int
stringency use to select the threshold used for counting.
"""
# Get infos from file name
fname_split = fpath_img_to_count.split('/')[-1].split('_')
experiment_name = fname_split[0]
hyb = fname_split[1]
gene = fname_split[2]
pos = fname_split[4].split('.')[0]
# Load the image to process
img = np.load(fpath_img_to_count) # image is np.uint16
img = img_as_float(img)
# Count the dots in the whole image
counting_dict=thr_calculator(img,min_distance,stringency)
counting_saving_dir=[saving_dir for saving_dir in counting_gene_dirs if gene in saving_dir.split('/')[-2] ][0]
fname = counting_saving_dir+experiment_name+'_'+hyb+'_'+gene+'_'+'pos_'+pos+'.pkl'
pickle.dump(counting_dict,open(fname,'wb'))
| 38.144509
| 135
| 0.69109
| 1,934
| 13,198
| 4.489142
| 0.106515
| 0.036282
| 0.020733
| 0.025801
| 0.908431
| 0.899908
| 0.886892
| 0.86927
| 0.861668
| 0.853605
| 0
| 0.010073
| 0.232763
| 13,198
| 346
| 136
| 38.144509
| 0.847324
| 0.452947
| 0
| 0.763636
| 0
| 0
| 0.020247
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.045455
| 0
| 0.109091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6ccc48fac2341b57cf8953951fce91a5a01c1911
| 7,815
|
py
|
Python
|
personas/migrations/0025_auto_20170415_0924.py
|
Ykharo/tutorial_P3_4
|
3e4e620833e897ce4af386fa2642c8f647ebab62
|
[
"MIT"
] | null | null | null |
personas/migrations/0025_auto_20170415_0924.py
|
Ykharo/tutorial_P3_4
|
3e4e620833e897ce4af386fa2642c8f647ebab62
|
[
"MIT"
] | null | null | null |
personas/migrations/0025_auto_20170415_0924.py
|
Ykharo/tutorial_P3_4
|
3e4e620833e897ce4af386fa2642c8f647ebab62
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-04-15 12:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('personas', '0024_ctto_alcancectto'),
]
operations = [
migrations.CreateModel(
name='CoordCtto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='PersonalCtta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Nombre', models.CharField(blank=True, max_length=100, null=True)),
('Cargo', models.CharField(blank=True, max_length=50, null=True)),
('Correo', models.CharField(blank=True, max_length=50, null=True)),
('Cel', models.CharField(blank=True, max_length=20, null=True)),
('CI', models.CharField(blank=True, max_length=20, null=True)),
],
),
migrations.CreateModel(
name='PersonalProyecto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Nombre', models.CharField(blank=True, max_length=100, null=True)),
('Cargo', models.CharField(blank=True, max_length=50, null=True)),
('Correo', models.CharField(blank=True, max_length=50, null=True)),
('Cel', models.CharField(blank=True, max_length=20, null=True)),
('CI', models.CharField(blank=True, max_length=20, null=True)),
('IdArea', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='personas.Area')),
],
),
migrations.CreateModel(
name='Reprentantes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('IdDuenoCeco', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='personas.Ceco')),
],
),
migrations.AddField(
model_name='ctta',
name='BcoCtta',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='ctta',
name='CiudadCtta',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='ctta',
name='ComunaCtta',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='ctta',
name='FechDocpersonCtta',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='ctta',
name='GiroCtta',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='ctta',
name='NotariapersonCtta',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='ctta',
name='NotarioCtta',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='ctta',
name='NumCtaCtta',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='ctto',
name='Anticipo',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=21, null=True),
),
migrations.AddField(
model_name='ctto',
name='Boleta',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=21, null=True),
),
migrations.AddField(
model_name='ctto',
name='DocOferta',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='ctto',
name='FechCartaAdj',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='ctto',
name='FechOferta',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='ctto',
name='IvaOferta',
field=models.CharField(blank=True, choices=[('IVA', 'Afecto a IVA'), ('NO_IVA', 'No Afecto a IVA'), ('RET_Legal', 'Retención Legal')], default='IVA', max_length=30, null=True),
),
migrations.AddField(
model_name='ctto',
name='LugarCtto',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='ctto',
name='Modalidad',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='ctto',
name='MonedaBoleta',
field=models.CharField(blank=True, choices=[('CLP', 'CLP'), ('USD', 'USD'), ('UF', 'UF'), ('EUR', 'EUR'), ('CAD', 'CAD')], default='CLP', max_length=5, null=True),
),
migrations.AddField(
model_name='ctto',
name='RetenCtto',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='ctto',
name='VigenBoleta',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='mdte',
name='CiudadMandte',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='mdte',
name='ComunaMandte',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='mdte',
name='FechDocpersonMandte',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='mdte',
name='NotariapersonMandte',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='mdte',
name='NotarioMandte',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='reprentantes',
name='IdMandante',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='personas.Mdte'),
),
migrations.AddField(
model_name='personalctta',
name='IdCtta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='personas.Ctta'),
),
migrations.AddField(
model_name='coordctto',
name='IdCtto',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='personas.Ctto'),
),
migrations.AddField(
model_name='coordctto',
name='IdPersCtta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='personas.PersonalCtta'),
),
migrations.AddField(
model_name='coordctto',
name='IdPersProy',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='personas.PersonalProyecto'),
),
]
| 39.469697
| 188
| 0.562892
| 775
| 7,815
| 5.56129
| 0.162581
| 0.070998
| 0.154756
| 0.181671
| 0.785615
| 0.785615
| 0.732947
| 0.732947
| 0.709281
| 0.709281
| 0
| 0.016447
| 0.299808
| 7,815
| 197
| 189
| 39.670051
| 0.771199
| 0.008573
| 0
| 0.721053
| 1
| 0
| 0.105617
| 0.008651
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015789
| 0
| 0.031579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6cebdd6a2145b053c53a3f643defe53c9cd00e1d
| 325
|
py
|
Python
|
src/plantuml_adapter/__init__.py
|
rcasteran/jarvis4se
|
17a276c7c2b831ca6efdb279d38624b54c0246e8
|
[
"MIT"
] | 4
|
2022-02-17T15:41:40.000Z
|
2022-03-25T09:00:08.000Z
|
src/plantuml_adapter/__init__.py
|
rcasteran/jarvis4se
|
17a276c7c2b831ca6efdb279d38624b54c0246e8
|
[
"MIT"
] | 13
|
2022-02-17T10:54:13.000Z
|
2022-03-28T08:05:06.000Z
|
src/plantuml_adapter/__init__.py
|
rcasteran/jarvis4se
|
17a276c7c2b831ca6efdb279d38624b54c0246e8
|
[
"MIT"
] | 1
|
2022-03-03T16:42:33.000Z
|
2022-03-03T16:42:33.000Z
|
from .plantuml_adapter import get_function_diagrams
from .plantuml_adapter import get_sequence_diagram
from .plantuml_adapter import get_state_machine_diagram
from .plantuml_adapter import get_url_from_string
from .plantuml_adapter import get_fun_elem_decomposition
from .plantuml_adapter import get_fun_elem_context_diagram
| 46.428571
| 58
| 0.907692
| 47
| 325
| 5.787234
| 0.361702
| 0.264706
| 0.419118
| 0.551471
| 0.720588
| 0.514706
| 0.257353
| 0
| 0
| 0
| 0
| 0
| 0.073846
| 325
| 6
| 59
| 54.166667
| 0.903654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9f40ebf5f00ef1a383b5b10bb4bc14b10aa12a55
| 44
|
py
|
Python
|
api/account/views/__init__.py
|
DenerRodrigues/flask-restful-api-example
|
40aa0b5fcdeacf5241063953c478756c85b5811d
|
[
"MIT"
] | 1
|
2019-12-20T00:17:22.000Z
|
2019-12-20T00:17:22.000Z
|
api/account/views/__init__.py
|
DenerRodrigues/flask-restful-api-example
|
40aa0b5fcdeacf5241063953c478756c85b5811d
|
[
"MIT"
] | null | null | null |
api/account/views/__init__.py
|
DenerRodrigues/flask-restful-api-example
|
40aa0b5fcdeacf5241063953c478756c85b5811d
|
[
"MIT"
] | null | null | null |
from . import user_views
from . import urls
| 14.666667
| 24
| 0.772727
| 7
| 44
| 4.714286
| 0.714286
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 25
| 22
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9f9699bb7ef13f2669bae1b1afaa2cfe7953aad5
| 9,826
|
py
|
Python
|
com/test/testCryptoProcessorMethods.py
|
MikeJohnManiatis/reddivestor
|
10240639bac3d9bb72c7ab00c5226bfe691abfd8
|
[
"Apache-2.0"
] | 2
|
2021-02-15T10:10:53.000Z
|
2021-02-17T16:22:35.000Z
|
com/test/testCryptoProcessorMethods.py
|
MikeJohnManiatis/reddivestor
|
10240639bac3d9bb72c7ab00c5226bfe691abfd8
|
[
"Apache-2.0"
] | 29
|
2021-02-18T03:01:22.000Z
|
2021-05-10T13:08:25.000Z
|
com/test/testCryptoProcessorMethods.py
|
MikeJohnManiatis/reddivestor
|
10240639bac3d9bb72c7ab00c5226bfe691abfd8
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from unittest.mock import MagicMock
from unittest import mock
from unittest.mock import patch
from com.src.Processor import Processor
from com.src.CryptoProcessor import CryptoProcessor
from com.src.network.ApiRequester import ApiRequester
from com.test.testUtil import *
from com.src.persist.MongoDatastore import MongoDatastore
from bs4 import BeautifulSoup
class TestCryptoProcessorMethods(unittest.TestCase):
@mock.patch('com.src.network.ApiRequester.ApiRequester')
@mock.patch('com.src.persist.MongoDatastore.MongoDatastore')
def test_populate_coin_hash(self, mock_api_requester, mock_mongo_datastore):
mock_api_requester.get.return_value= {'data': [{'name': 'Litecoin', 'symbol':'LTC'}, {'name': 'Ethereum', 'symbol':'ETH'}, {'name': 'Chainlink', 'symbol':'LINK'}] }
mock_mongo_datastore.insert.return_value= None
crypto_processor = CryptoProcessor(mock_api_requester, mock_mongo_datastore)
crypto_processor.populate_coin_hash()
self.assertTrue(len(crypto_processor.coin_hash_table) > 1)
@mock.patch('com.src.persist.MongoDatastore.MongoDatastore')
@mock.patch('com.src.network.ApiRequester.ApiRequester')
def test_handle(self, mock_api_requester, mock_mongo_datastore):
mock_api_requester.get.return_value = {'data': [{'name': 'Litecoin', 'symbol':'LTC'}, {'name': 'Ethereum', 'symbol':'ETH'}, {'name': 'Chainlink', 'symbol':'LINK'}, {'name': 'Bitcoin Cash', 'symbol': 'BCH'}] }
crypto_processor = CryptoProcessor(mock_api_requester, mock_mongo_datastore)
crypto_processor.populate_coin_hash()
soup = BeautifulSoup("<html> \
<h3>Ethereum is fantastic.</h3> \
<div> \
<div><p> Insert dummy text here </p> </div> \
</div> \
<h3>I Like $LINK because i like defi</h3> \
<div> \
<div><p> Do you like it too? </p> </div> \
</div> \
<h3>Bitcoin Cash is awesome.. coins</h3> \
<div> \
<div><p> $LTC is great!</p> </div> \
</div> \
</html>", 'lxml')
crypto_processor.handle(soup, "TestSubReddit.com")
self.assertEquals(mock_mongo_datastore.insert.call_count, 4)
@mock.patch('com.src.persist.MongoDatastore.MongoDatastore')
@mock.patch('com.src.network.ApiRequester.ApiRequester')
def test_populate_coin_hash_null(self, mock_api_requester, mock_mongo_datastore):
mock_api_requester.get.return_value = None
crypto_processor = CryptoProcessor(mock_api_requester, mock_mongo_datastore)
crypto_processor.populate_coin_hash()
self.assertTrue(len(crypto_processor.coin_hash_table) == 0)
@mock.patch('com.src.persist.MongoDatastore.MongoDatastore')
@mock.patch('com.src.network.ApiRequester.ApiRequester')
def test_populate_seen_posts(self, mock_api_requester,mock_mongo_datastore):
mock_mongo_datastore.get.return_value = [{'post':'Test!'}]
crypto_processor = CryptoProcessor(mock_api_requester, mock_mongo_datastore)
crypto_processor.populate_seen_post_titles()
self.assertTrue(len(crypto_processor.seen_post_titles) == 1)
@mock.patch('com.src.persist.MongoDatastore.MongoDatastore')
@mock.patch('com.src.network.ApiRequester.ApiRequester')
def test_currently_seen_coins(self, mock_api_requester,mock_mongo_datastore):
crypto_processor = CryptoProcessor(mock_api_requester, mock_mongo_datastore)
mock_mongo_datastore.get.return_value = []
mock_api_requester.get.return_value = {'data': [{'name': 'Cardano', 'symbol':'ADA'}] }
crypto_processor.populate_seen_post_titles()
crypto_processor.populate_coin_hash()
soup = BeautifulSoup("<html> \
<h3>This Cardano coin is fantastic.</h3> \
<div> \
<div><p> Google searches for Cardano breaks new high records, following breaking all-time high price, as retail investors surge towards ADA. The number of Google for ADA hit the roof since the beginning of February. The search interest for ADA increased with predictions around the crypto assets, which projected the value to more than double by March 6th, 2021. ADA has Increased about 600% from beginning of the year till date. </p> </div> \
</div> \
</html>", 'lxml')
crypto_processor.handle(soup, "TestSubReddit.com")
self.assertTrue(mock_mongo_datastore.insert.call_count == 2)
@mock.patch('com.src.persist.MongoDatastore.MongoDatastore')
@mock.patch('com.src.network.ApiRequester.ApiRequester')
def test_populate_seen_posts_2_new_posts(self, mock_api_requester,mock_mongo_datastore):
crypto_processor = CryptoProcessor(mock_api_requester, mock_mongo_datastore)
mock_mongo_datastore.get.return_value = [{'post':'Ethereum is fantastic.'}]
mock_api_requester.get.return_value = {'data': [{'name': 'Litecoin', 'symbol':'LTC'}, {'name': 'Ethereum', 'symbol':'ETH'}, {'name': 'Chainlink', 'symbol':'LINK'}] }
crypto_processor.populate_seen_post_titles()
crypto_processor.populate_coin_hash()
soup = BeautifulSoup("<html> \
<h3>Ethereum is fantastic.</h3> \
<div> \
<div><p> Insert dummy text here </p> </div> \
</div> \
<h3>I Like $LINK because i like defi</h3> \
<div> \
<div><p> Do you like it too? </p> </div> \
</div> \
<h3>I like coins</h3> \
<div> \
<div><p> $LTC is great!</p> </div> \
</div> \
</html>", 'lxml')
crypto_processor.handle(soup, "TestSubReddit.com")
self.assertTrue(mock_mongo_datastore.insert.call_count == 2)
@mock.patch('com.src.persist.MongoDatastore.MongoDatastore')
@mock.patch('com.src.network.ApiRequester.ApiRequester')
def test_populate_seen_posts_0_new_posts(self, mock_api_requester,mock_mongo_datastore):
crypto_processor = CryptoProcessor(mock_api_requester, mock_mongo_datastore)
mock_mongo_datastore.get.return_value = [{'post':'Ethereum is fantastic.'}, {'post': 'I Like LINK because i like defi'}, {'post': ' LTC is great!'}]
mock_api_requester.get.return_value = {'data': [{'name': 'Litecoin', 'symbol':'LTC'}, {'name': 'Ethereum', 'symbol':'ETH'}, {'name': 'Chainlink', 'symbol':'LINK'}] }
crypto_processor.populate_seen_post_titles()
crypto_processor.populate_coin_hash()
soup = BeautifulSoup("<html> \
<h3>Ethereum is fantastic.</h3> \
<div> \
<div><p> Insert dummy text here </p> </div> \
</div> \
<h3>I Like LINK because i like defi</h3> \
<div> \
<div><p> Do you like it too? </p> </div> \
</div> \
<h3>I like coins</h3> \
<div> \
<div><p> LTC is great!</p> </div> \
</div> \
</html>", 'lxml')
crypto_processor.handle(soup, "TestSubReddit.com")
self.assertTrue(mock_mongo_datastore.insert.call_count == 0)
@mock.patch('com.src.persist.MongoDatastore.MongoDatastore')
@mock.patch('com.src.network.ApiRequester.ApiRequester')
def test_populate_seen_posts_NULL_new_posts(self, mock_api_requester,mock_mongo_datastore):
crypto_processor = CryptoProcessor(mock_api_requester, mock_mongo_datastore)
mock_mongo_datastore.get.return_value = None
mock_api_requester.get.return_value = {'data': [{'name': 'Litecoin', 'symbol':'LTC'}, {'name': 'Ethereum', 'symbol':'ETH'}, {'name': 'Chainlink', 'symbol':'LINK'}] }
crypto_processor.populate_seen_post_titles()
crypto_processor.populate_coin_hash()
soup = BeautifulSoup("<html> \
<h3>Ethereum is fantastic.</h3> \
<div> \
<div><p> Insert dummy text here </p> </div> \
</div> \
<h3>I Like $LINK because i like defi</h3> \
<div> \
<div><p> Do you like it too? </p> </div> \
</div> \
<h3>I like coins</h3> \
<div> \
<div><p> $LTC is great!</p> </div> \
</div> \
</html>", 'lxml')
crypto_processor.handle(soup, "TestSubReddit.com")
self.assertEquals(3, mock_mongo_datastore.insert.call_count)
@mock.patch('com.src.persist.MongoDatastore.MongoDatastore')
@mock.patch('com.src.network.ApiRequester.ApiRequester')
def testMoneySignInName(self, mock_api_requester,mock_mongo_datastore):
crypto_processor = CryptoProcessor(mock_api_requester, mock_mongo_datastore)
mock_mongo_datastore.get.return_value = None
mock_api_requester.get.return_value = {'data': [{'name': 'Nano', 'symbol':'NANO'}, {'name': 'Ethereum', 'symbol':'ETH'}, {'name': 'ForTube', 'symbol':'FOR'}] }
crypto_processor.populate_seen_post_titles()
crypto_processor.populate_coin_hash()
soup = BeautifulSoup("<html> \
<h3>I think $FOR coin will really be a great coin.</h3> \
<div> \
<div><p> $FOR coin is always rising.!</p> </div> \
</div> \
</html>", 'lxml')
crypto_processor.handle(soup, "TestSubReddit.com")
self.assertEqual(mock_mongo_datastore.insert.call_count, 2)
if __name__ == '__main__':
unittest.main()
| 56.797688
| 464
| 0.620191
| 1,111
| 9,826
| 5.262826
| 0.137714
| 0.082093
| 0.095434
| 0.046178
| 0.823841
| 0.808791
| 0.791688
| 0.765179
| 0.757996
| 0.757996
| 0
| 0.006486
| 0.246896
| 9,826
| 173
| 465
| 56.797688
| 0.783649
| 0
| 0
| 0.720497
| 0
| 0.006211
| 0.150504
| 0.078763
| 0
| 0
| 0
| 0
| 0.055901
| 1
| 0.055901
| false
| 0
| 0.062112
| 0
| 0.124224
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9fa6f6a569b9477041c88714b1855abeb2b796ba
| 68
|
py
|
Python
|
logger/__init__.py
|
coolmacmaniac/pyutils
|
f5e4089d05159407d21de6d589c535e581dc94cc
|
[
"MIT"
] | null | null | null |
logger/__init__.py
|
coolmacmaniac/pyutils
|
f5e4089d05159407d21de6d589c535e581dc94cc
|
[
"MIT"
] | null | null | null |
logger/__init__.py
|
coolmacmaniac/pyutils
|
f5e4089d05159407d21de6d589c535e581dc94cc
|
[
"MIT"
] | null | null | null |
from .log import log
from .log import logn
from .log import lognone
| 17
| 24
| 0.779412
| 12
| 68
| 4.416667
| 0.416667
| 0.396226
| 0.735849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 68
| 3
| 25
| 22.666667
| 0.946429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4c8ab73b755df5bec37e782511f5af8fb8198c03
| 41,639
|
py
|
Python
|
google/cloud/vmmigration_v1/services/vm_migration/pagers.py
|
renovate-bot/python-vmmigration
|
80a2cf46a21f516899da818a7aec0f2a67222047
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/vmmigration_v1/services/vm_migration/pagers.py
|
renovate-bot/python-vmmigration
|
80a2cf46a21f516899da818a7aec0f2a67222047
|
[
"Apache-2.0"
] | 10
|
2021-11-18T10:47:48.000Z
|
2022-03-07T15:48:54.000Z
|
google/cloud/vmmigration_v1/services/vm_migration/pagers.py
|
renovate-bot/python-vmmigration
|
80a2cf46a21f516899da818a7aec0f2a67222047
|
[
"Apache-2.0"
] | 1
|
2022-01-29T08:15:02.000Z
|
2022-01-29T08:15:02.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Iterator,
Optional,
Sequence,
Tuple,
)
from google.cloud.vmmigration_v1.types import vmmigration
class ListSourcesPager:
"""A pager for iterating through ``list_sources`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListSourcesResponse` object, and
provides an ``__iter__`` method to iterate through its
``sources`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListSources`` requests and continue to iterate
through the ``sources`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListSourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., vmmigration.ListSourcesResponse],
request: vmmigration.ListSourcesRequest,
response: vmmigration.ListSourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListSourcesRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListSourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListSourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[vmmigration.ListSourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[vmmigration.Source]:
for page in self.pages:
yield from page.sources
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListSourcesAsyncPager:
"""A pager for iterating through ``list_sources`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListSourcesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``sources`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListSources`` requests and continue to iterate
through the ``sources`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListSourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[vmmigration.ListSourcesResponse]],
request: vmmigration.ListSourcesRequest,
response: vmmigration.ListSourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListSourcesRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListSourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListSourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[vmmigration.ListSourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[vmmigration.Source]:
async def async_generator():
async for page in self.pages:
for response in page.sources:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListUtilizationReportsPager:
"""A pager for iterating through ``list_utilization_reports`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListUtilizationReportsResponse` object, and
provides an ``__iter__`` method to iterate through its
``utilization_reports`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListUtilizationReports`` requests and continue to iterate
through the ``utilization_reports`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListUtilizationReportsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., vmmigration.ListUtilizationReportsResponse],
request: vmmigration.ListUtilizationReportsRequest,
response: vmmigration.ListUtilizationReportsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListUtilizationReportsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListUtilizationReportsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListUtilizationReportsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[vmmigration.ListUtilizationReportsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[vmmigration.UtilizationReport]:
for page in self.pages:
yield from page.utilization_reports
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListUtilizationReportsAsyncPager:
"""A pager for iterating through ``list_utilization_reports`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListUtilizationReportsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``utilization_reports`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListUtilizationReports`` requests and continue to iterate
through the ``utilization_reports`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListUtilizationReportsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[vmmigration.ListUtilizationReportsResponse]],
request: vmmigration.ListUtilizationReportsRequest,
response: vmmigration.ListUtilizationReportsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListUtilizationReportsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListUtilizationReportsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListUtilizationReportsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[vmmigration.ListUtilizationReportsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[vmmigration.UtilizationReport]:
async def async_generator():
async for page in self.pages:
for response in page.utilization_reports:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListDatacenterConnectorsPager:
"""A pager for iterating through ``list_datacenter_connectors`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListDatacenterConnectorsResponse` object, and
provides an ``__iter__`` method to iterate through its
``datacenter_connectors`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListDatacenterConnectors`` requests and continue to iterate
through the ``datacenter_connectors`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListDatacenterConnectorsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., vmmigration.ListDatacenterConnectorsResponse],
request: vmmigration.ListDatacenterConnectorsRequest,
response: vmmigration.ListDatacenterConnectorsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListDatacenterConnectorsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListDatacenterConnectorsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListDatacenterConnectorsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[vmmigration.ListDatacenterConnectorsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[vmmigration.DatacenterConnector]:
for page in self.pages:
yield from page.datacenter_connectors
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListDatacenterConnectorsAsyncPager:
"""A pager for iterating through ``list_datacenter_connectors`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListDatacenterConnectorsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``datacenter_connectors`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListDatacenterConnectors`` requests and continue to iterate
through the ``datacenter_connectors`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListDatacenterConnectorsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[vmmigration.ListDatacenterConnectorsResponse]],
request: vmmigration.ListDatacenterConnectorsRequest,
response: vmmigration.ListDatacenterConnectorsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListDatacenterConnectorsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListDatacenterConnectorsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListDatacenterConnectorsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterator[vmmigration.ListDatacenterConnectorsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[vmmigration.DatacenterConnector]:
async def async_generator():
async for page in self.pages:
for response in page.datacenter_connectors:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListMigratingVmsPager:
"""A pager for iterating through ``list_migrating_vms`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListMigratingVmsResponse` object, and
provides an ``__iter__`` method to iterate through its
``migrating_vms`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListMigratingVms`` requests and continue to iterate
through the ``migrating_vms`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListMigratingVmsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., vmmigration.ListMigratingVmsResponse],
request: vmmigration.ListMigratingVmsRequest,
response: vmmigration.ListMigratingVmsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListMigratingVmsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListMigratingVmsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListMigratingVmsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[vmmigration.ListMigratingVmsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[vmmigration.MigratingVm]:
for page in self.pages:
yield from page.migrating_vms
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListMigratingVmsAsyncPager:
"""A pager for iterating through ``list_migrating_vms`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListMigratingVmsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``migrating_vms`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListMigratingVms`` requests and continue to iterate
through the ``migrating_vms`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListMigratingVmsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[vmmigration.ListMigratingVmsResponse]],
request: vmmigration.ListMigratingVmsRequest,
response: vmmigration.ListMigratingVmsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListMigratingVmsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListMigratingVmsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListMigratingVmsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[vmmigration.ListMigratingVmsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[vmmigration.MigratingVm]:
async def async_generator():
async for page in self.pages:
for response in page.migrating_vms:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListCloneJobsPager:
"""A pager for iterating through ``list_clone_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListCloneJobsResponse` object, and
provides an ``__iter__`` method to iterate through its
``clone_jobs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListCloneJobs`` requests and continue to iterate
through the ``clone_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListCloneJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., vmmigration.ListCloneJobsResponse],
request: vmmigration.ListCloneJobsRequest,
response: vmmigration.ListCloneJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListCloneJobsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListCloneJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListCloneJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[vmmigration.ListCloneJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[vmmigration.CloneJob]:
for page in self.pages:
yield from page.clone_jobs
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListCloneJobsAsyncPager:
"""A pager for iterating through ``list_clone_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListCloneJobsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``clone_jobs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListCloneJobs`` requests and continue to iterate
through the ``clone_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListCloneJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[vmmigration.ListCloneJobsResponse]],
request: vmmigration.ListCloneJobsRequest,
response: vmmigration.ListCloneJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListCloneJobsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListCloneJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListCloneJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[vmmigration.ListCloneJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[vmmigration.CloneJob]:
async def async_generator():
async for page in self.pages:
for response in page.clone_jobs:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListCutoverJobsPager:
"""A pager for iterating through ``list_cutover_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListCutoverJobsResponse` object, and
provides an ``__iter__`` method to iterate through its
``cutover_jobs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListCutoverJobs`` requests and continue to iterate
through the ``cutover_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListCutoverJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., vmmigration.ListCutoverJobsResponse],
request: vmmigration.ListCutoverJobsRequest,
response: vmmigration.ListCutoverJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListCutoverJobsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListCutoverJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListCutoverJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[vmmigration.ListCutoverJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[vmmigration.CutoverJob]:
for page in self.pages:
yield from page.cutover_jobs
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListCutoverJobsAsyncPager:
"""A pager for iterating through ``list_cutover_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListCutoverJobsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``cutover_jobs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListCutoverJobs`` requests and continue to iterate
through the ``cutover_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListCutoverJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[vmmigration.ListCutoverJobsResponse]],
request: vmmigration.ListCutoverJobsRequest,
response: vmmigration.ListCutoverJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListCutoverJobsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListCutoverJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListCutoverJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[vmmigration.ListCutoverJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[vmmigration.CutoverJob]:
async def async_generator():
async for page in self.pages:
for response in page.cutover_jobs:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListGroupsPager:
"""A pager for iterating through ``list_groups`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListGroupsResponse` object, and
provides an ``__iter__`` method to iterate through its
``groups`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListGroups`` requests and continue to iterate
through the ``groups`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListGroupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., vmmigration.ListGroupsResponse],
request: vmmigration.ListGroupsRequest,
response: vmmigration.ListGroupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListGroupsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListGroupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListGroupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[vmmigration.ListGroupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[vmmigration.Group]:
for page in self.pages:
yield from page.groups
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListGroupsAsyncPager:
"""A pager for iterating through ``list_groups`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListGroupsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``groups`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListGroups`` requests and continue to iterate
through the ``groups`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListGroupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[vmmigration.ListGroupsResponse]],
request: vmmigration.ListGroupsRequest,
response: vmmigration.ListGroupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListGroupsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListGroupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListGroupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[vmmigration.ListGroupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[vmmigration.Group]:
async def async_generator():
async for page in self.pages:
for response in page.groups:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListTargetProjectsPager:
"""A pager for iterating through ``list_target_projects`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListTargetProjectsResponse` object, and
provides an ``__iter__`` method to iterate through its
``target_projects`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListTargetProjects`` requests and continue to iterate
through the ``target_projects`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListTargetProjectsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., vmmigration.ListTargetProjectsResponse],
request: vmmigration.ListTargetProjectsRequest,
response: vmmigration.ListTargetProjectsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListTargetProjectsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListTargetProjectsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListTargetProjectsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[vmmigration.ListTargetProjectsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[vmmigration.TargetProject]:
for page in self.pages:
yield from page.target_projects
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListTargetProjectsAsyncPager:
"""A pager for iterating through ``list_target_projects`` requests.
This class thinly wraps an initial
:class:`google.cloud.vmmigration_v1.types.ListTargetProjectsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``target_projects`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListTargetProjects`` requests and continue to iterate
through the ``target_projects`` field on the
corresponding responses.
All the usual :class:`google.cloud.vmmigration_v1.types.ListTargetProjectsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[vmmigration.ListTargetProjectsResponse]],
request: vmmigration.ListTargetProjectsRequest,
response: vmmigration.ListTargetProjectsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.vmmigration_v1.types.ListTargetProjectsRequest):
The initial request object.
response (google.cloud.vmmigration_v1.types.ListTargetProjectsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = vmmigration.ListTargetProjectsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[vmmigration.ListTargetProjectsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[vmmigration.TargetProject]:
async def async_generator():
async for page in self.pages:
for response in page.target_projects:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| 39.505693
| 93
| 0.674392
| 4,400
| 41,639
| 6.16
| 0.05
| 0.056671
| 0.05276
| 0.057556
| 0.952553
| 0.951483
| 0.951483
| 0.951483
| 0.942333
| 0.942333
| 0
| 0.003365
| 0.24345
| 41,639
| 1,053
| 94
| 39.54321
| 0.857025
| 0.458176
| 0
| 0.803838
| 0
| 0
| 0.007879
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153518
| false
| 0
| 0.004264
| 0.06823
| 0.277186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4cac7362fb37726a88676df6e750e09ed4583216
| 141
|
py
|
Python
|
capstone/django_sql_trace/__init__.py
|
ChefAndy/capstone
|
bac7c44518312c5b64462ea92ecbabdcb5d29bb6
|
[
"MIT"
] | 134
|
2017-07-12T17:03:06.000Z
|
2022-03-27T06:38:29.000Z
|
capstone/django_sql_trace/__init__.py
|
fakegit/capstone
|
57647481de99bbe4af52b6dd5ade1954fba41a2d
|
[
"MIT"
] | 1,362
|
2017-06-22T17:42:49.000Z
|
2022-03-31T15:28:00.000Z
|
capstone/django_sql_trace/__init__.py
|
ChefAndy/capstone
|
bac7c44518312c5b64462ea92ecbabdcb5d29bb6
|
[
"MIT"
] | 38
|
2017-06-22T14:46:23.000Z
|
2022-03-16T05:32:54.000Z
|
import django.db.backends.utils
from .wrapper import TracingDebugWrapper
django.db.backends.utils.CursorDebugWrapper = TracingDebugWrapper
| 23.5
| 65
| 0.858156
| 15
| 141
| 8.066667
| 0.6
| 0.132231
| 0.264463
| 0.347107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078014
| 141
| 5
| 66
| 28.2
| 0.930769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
98142c934244910a8e9d1b4383b03ad222fd1ffb
| 18,086
|
py
|
Python
|
code-metrics-dev/gerar_pipeline/tools/__init__.py
|
clodonil/audit-aws-pipeline
|
44a41c63fc84096c2327bf6d34909dff1ca3fdab
|
[
"Apache-2.0"
] | null | null | null |
code-metrics-dev/gerar_pipeline/tools/__init__.py
|
clodonil/audit-aws-pipeline
|
44a41c63fc84096c2327bf6d34909dff1ca3fdab
|
[
"Apache-2.0"
] | null | null | null |
code-metrics-dev/gerar_pipeline/tools/__init__.py
|
clodonil/audit-aws-pipeline
|
44a41c63fc84096c2327bf6d34909dff1ca3fdab
|
[
"Apache-2.0"
] | null | null | null |
def pipeline_success(account,execution_id,pipeline,region, pipeline_id):
tmp = "arn:aws:codepipeline:{0}:{1}:{2}".format(region, account,pipeline)
arn =[tmp]
chamada_api = []
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region":region, "state": "STARTED", "version": 2.0, "action": "Compilacao", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:25Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "STARTED", "stage": "CI"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:24Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "SUCCEEDED", "version": 2.0, "action": "TestUnit", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:51:27Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "SUCCEEDED", "stage": "SourceCode"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:24Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "SUCCEEDED", "version": 2.0, "action": "Compilacao", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:51:28Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "STARTED", "stage": "Publish"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:52:32Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "SUCCEEDED", "version": 2.0, "action": "Source", "type": {"owner": "AWS", "category": "Source", "version": "1", "provider": "CodeCommit"}, "stage": "SourceCode"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:23Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "STARTED"}, "detail-type": "CodePipeline Pipeline Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:17Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "SUCCEEDED", "version": 2.0, "action": "Scan", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:52:32Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "STARTED", "stage": "SourceCode"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:17Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "STARTED", "version": 2.0, "action": "TestUnit", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:24Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "SUCCEEDED", "stage": "Publish"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:53:36Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "SUCCEEDED"}, "detail-type": "CodePipeline Pipeline Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:53:36Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"eventVersion": "1.05", "eventID": "28f9c968-9ab7-49b1-ab53-f23d8ca9c986", "eventTime": "2019-07-30T20:50:17Z", "requestParameters": {"name": pipeline, "clientRequestToken": "27da5ffc-79fe-4387-b55c-0e0fbbe108d5"}, "eventType": "AwsApiCall", "responseElements": {"pipelineExecutionId": execution_id}, "awsRegion": region, "eventName": "StartPipelineExecution", "userIdentity": {"userName": "jose@localhost", "principalId": "123123123", "accessKeyId": "324234324", "invokedBy": "signin.amazonaws.com", "sessionContext": {"attributes": {"creationDate": "2019-07-30T20:42:54Z", "mfaAuthenticated": "false"}}, "type": "IAMUser", "arn": "arn:aws:iam::325847872862:user/clodonil.trigo@itau-unibanco.com.br", "accountId": account}, "eventSource": "codepipeline.amazonaws.com", "requestID": "b72b78a0-dacd-4def-a963-79fe6fdc3e8f", "userAgent": "signin.amazonaws.com", "sourceIPAddress": "200.196.153.14"}, "detail-type": "AWS API Call via CloudTrail", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:17Z", "id": "f591323b-3cfd-4dc4-6efc-37903edf77ec", "resources": []})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "SUCCEEDED", "version": 2.0, "action": "ECR", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "Publish"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:53:36Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "STARTED", "version": 2.0, "action": "Scan", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:51:29Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "STARTED", "version": 2.0, "action": "ECR", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "Publish"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:52:33Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "STARTED", "version": 2.0, "action": "Source", "type": {"owner": "AWS", "category": "Source", "version": "1", "provider": "CodeCommit"}, "stage": "SourceCode"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:18Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "SUCCEEDED", "stage": "CI"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:52:32Z", "id": pipeline_id, "resources": arn})
return chamada_api
def pipeline_faild(account,execution_id,pipeline,region, pipeline_id):
tmp = "arn:aws:codepipeline:{0}:{1}:{2}".format(region, account,pipeline)
arn =[tmp]
chamada_api = []
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region":region, "state": "STARTED", "version": 2.0, "action": "Compilacao", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:25Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "STARTED", "stage": "CI"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:24Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "SUCCEEDED", "version": 2.0, "action": "TestUnit", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:51:27Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "SUCCEEDED", "stage": "SourceCode"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:24Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "SUCCEEDED", "version": 2.0, "action": "Compilacao", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:51:28Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "STARTED", "stage": "Publish"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:52:32Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "SUCCEEDED", "version": 2.0, "action": "Source", "type": {"owner": "AWS", "category": "Source", "version": "1", "provider": "CodeCommit"}, "stage": "SourceCode"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:23Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "STARTED"}, "detail-type": "CodePipeline Pipeline Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:17Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "SUCCEEDED", "version": 2.0, "action": "Scan", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:52:32Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "STARTED", "stage": "SourceCode"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:17Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "STARTED", "version": 2.0, "action": "TestUnit", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:24Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "FAILED", "stage": "Publish"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:53:36Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "FAILED"}, "detail-type": "CodePipeline Pipeline Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:53:36Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"eventVersion": "1.05", "eventID": "28f9c968-9ab7-49b1-ab53-f23d8ca9c986", "eventTime": "2019-07-30T20:50:17Z", "requestParameters": {"name": pipeline, "clientRequestToken": "27da5ffc-79fe-4387-b55c-0e0fbbe108d5"}, "eventType": "AwsApiCall", "responseElements": {"pipelineExecutionId": execution_id}, "awsRegion": region, "eventName": "StartPipelineExecution", "userIdentity": {"userName": "jose@localhost", "principalId": "123123123", "accessKeyId": "324234324", "invokedBy": "signin.amazonaws.com", "sessionContext": {"attributes": {"creationDate": "2019-07-30T20:42:54Z", "mfaAuthenticated": "false"}}, "type": "IAMUser", "arn": "arn:aws:iam::325847872862:user/clodonil.trigo@itau-unibanco.com.br", "accountId": account}, "eventSource": "codepipeline.amazonaws.com", "requestID": "b72b78a0-dacd-4def-a963-79fe6fdc3e8f", "userAgent": "signin.amazonaws.com", "sourceIPAddress": "200.196.153.14"}, "detail-type": "AWS API Call via CloudTrail", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:17Z", "id": "f591323b-3cfd-4dc4-6efc-37903edf77ec", "resources": []})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "FAILED", "version": 2.0, "action": "ECR", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "Publish"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:53:36Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "STARTED", "version": 2.0, "action": "Scan", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "CI"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:51:29Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "STARTED", "version": 2.0, "action": "ECR", "type": {"owner": "AWS", "category": "Build", "version": "1", "provider": "CodeBuild"}, "stage": "Publish"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:52:33Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "region": region, "state": "STARTED", "version": 2.0, "action": "Source", "type": {"owner": "AWS", "category": "Source", "version": "1", "provider": "CodeCommit"}, "stage": "SourceCode"}, "detail-type": "CodePipeline Action Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:50:18Z", "id": pipeline_id, "resources": arn})
chamada_api.append({"account": account, "region": region, "detail": {"execution-id": execution_id, "pipeline": pipeline, "version": 2.0, "state": "SUCCEEDED", "stage": "CI"}, "detail-type": "CodePipeline Stage Execution State Change", "source": "aws.codepipeline", "version": "0", "time": "2019-07-30T20:52:32Z", "id": pipeline_id, "resources": arn})
return chamada_api
| 334.925926
| 1,158
| 0.669855
| 2,140
| 18,086
| 5.604206
| 0.064486
| 0.069707
| 0.038522
| 0.072876
| 0.996665
| 0.996665
| 0.996665
| 0.996665
| 0.996665
| 0.996665
| 0
| 0.05795
| 0.095488
| 18,086
| 53
| 1,159
| 341.245283
| 0.675164
| 0
| 0
| 0.833333
| 0
| 0.041667
| 0.540252
| 0.032069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e298f742dad48d1d6c540e8526d04df0393781ff
| 204
|
py
|
Python
|
transquest/algo/siamese_transformers/losses/__init__.py
|
joaomoura1996/TransQuest
|
d10228616d94839c209786eda639c59b8b4a2182
|
[
"Apache-2.0"
] | null | null | null |
transquest/algo/siamese_transformers/losses/__init__.py
|
joaomoura1996/TransQuest
|
d10228616d94839c209786eda639c59b8b4a2182
|
[
"Apache-2.0"
] | null | null | null |
transquest/algo/siamese_transformers/losses/__init__.py
|
joaomoura1996/TransQuest
|
d10228616d94839c209786eda639c59b8b4a2182
|
[
"Apache-2.0"
] | null | null | null |
from .batch_hard_triplet_loss import *
from .cosine_similarity_loss import *
from .mse_loss import *
from .multiple_negatives_ranking_loss import *
from .softmax_loss import *
from .triplet_loss import *
| 29.142857
| 46
| 0.823529
| 29
| 204
| 5.413793
| 0.448276
| 0.382166
| 0.44586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 204
| 6
| 47
| 34
| 0.872222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2c3fce143c8bbc4958c89bb2355b68d1e0387322
| 18,112
|
py
|
Python
|
src/costmanagement/azext_costmanagement/generated/custom.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/costmanagement/azext_costmanagement/generated/custom.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/costmanagement/azext_costmanagement/generated/custom.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
import json
# def costmanagement_view_list(cmd, client,
# scope=None):
# if scope is not None:
# return client.list_by_scope(scope=scope)
# return client.list()
# def costmanagement_view_show(cmd, client,
# view_name,
# scope=None):
# if scope is not None and view_name is not None:
# return client.get_by_scope(scope=scope,
# view_name=view_name)
# return client.get(view_name=view_name)
# def costmanagement_view_create(cmd, client,
# view_name,
# scope=None,
# e_tag=None,
# display_name=None,
# properties_scope=None,
# chart=None,
# accumulated=None,
# metric=None,
# kpis=None,
# pivots=None,
# query_timeframe=None,
# query_time_period=None,
# query_dataset=None):
# if isinstance(query_dataset, str):
# query_dataset = json.loads(query_dataset)
# if scope is not None and view_name is not None and _parameters is not None:
# return client.create_or_update_by_scope(scope=scope,
# view_name=view_name,
# e_tag=e_tag,
# display_name=display_name,
# view_properties_scope=properties_scope,
# chart=chart,
# accumulated=accumulated,
# metric=metric,
# kpis=kpis,
# pivots=pivots,
# timeframe=query_timeframe,
# time_period=query_time_period,
# dataset=query_dataset)
# return client.create_or_update(view_name=view_name,
# e_tag=e_tag,
# display_name=display_name,
# scope=scope,
# chart=chart,
# accumulated=accumulated,
# metric=metric,
# kpis=kpis,
# pivots=pivots,
# timeframe=query_timeframe,
# time_period=query_time_period,
# dataset=query_dataset)
# def costmanagement_view_delete(cmd, client,
# view_name,
# scope=None):
# if scope is not None and view_name is not None:
# return client.delete_by_scope(scope=scope,
# view_name=view_name)
# return client.delete(view_name=view_name)
# def costmanagement_alert_list(cmd, client,
# scope):
# return client.list(scope=scope)
# def costmanagement_alert_list_external(cmd, client,
# external_cloud_provider_type,
# external_cloud_provider_id):
# return client.list_external(external_cloud_provider_type=external_cloud_provider_type,
# external_cloud_provider_id=external_cloud_provider_id)
# def costmanagement_forecast_external_cloud_provider_usage(cmd, client,
# external_cloud_provider_type,
# external_cloud_provider_id,
# type_,
# timeframe,
# filter=None,
# time_period=None,
# include_actual_cost=None,
# include_fresh_partial_cost=None,
# dataset_configuration=None,
# dataset_aggregation=None,
# dataset_grouping=None,
# dataset_filter=None):
# if isinstance(dataset_aggregation, str):
# dataset_aggregation = json.loads(dataset_aggregation)
# if isinstance(dataset_filter, str):
# dataset_filter = json.loads(dataset_filter)
# return client.external_cloud_provider_usage(filter=filter,
# external_cloud_provider_type=external_cloud_provider_type,
# external_cloud_provider_id=external_cloud_provider_id,
# type=type_,
# timeframe=timeframe,
# time_period=time_period,
# include_actual_cost=include_actual_cost,
# include_fresh_partial_cost=include_fresh_partial_cost,
# configuration=dataset_configuration,
# aggregation=dataset_aggregation,
# grouping=dataset_grouping,
# query_filter=dataset_filter)
# def costmanagement_forecast_usage(cmd, client,
# scope,
# type_,
# timeframe,
# filter=None,
# time_period=None,
# include_actual_cost=None,
# include_fresh_partial_cost=None,
# dataset_configuration=None,
# dataset_aggregation=None,
# dataset_grouping=None,
# dataset_filter=None):
# if isinstance(dataset_aggregation, str):
# dataset_aggregation = json.loads(dataset_aggregation)
# if isinstance(dataset_filter, str):
# dataset_filter = json.loads(dataset_filter)
# return client.usage(filter=filter,
# scope=scope,
# type=type_,
# timeframe=timeframe,
# time_period=time_period,
# include_actual_cost=include_actual_cost,
# include_fresh_partial_cost=include_fresh_partial_cost,
# configuration=dataset_configuration,
# aggregation=dataset_aggregation,
# grouping=dataset_grouping,
# query_filter=dataset_filter)
# def costmanagement_dimension_list(cmd, client,
# scope,
# filter=None,
# expand=None,
# skiptoken=None,
# top=None):
# return client.list(scope=scope,
# filter=filter,
# expand=expand,
# skiptoken=skiptoken,
# top=top)
# def costmanagement_dimension_by_external_cloud_provider_type(cmd, client,
# external_cloud_provider_type,
# external_cloud_provider_id,
# filter=None,
# expand=None,
# skiptoken=None,
# top=None):
# return client.by_external_cloud_provider_type(external_cloud_provider_type=external_cloud_provider_type,
# external_cloud_provider_id=external_cloud_provider_id,
# filter=filter,
# expand=expand,
# skiptoken=skiptoken,
# top=top)
def costmanagement_query_usage(cmd, client,
scope,
type_,
timeframe,
time_period=None,
dataset_configuration=None,
dataset_aggregation=None,
dataset_grouping=None,
dataset_filter=None):
if isinstance(dataset_aggregation, str):
dataset_aggregation = json.loads(dataset_aggregation)
if isinstance(dataset_filter, str):
dataset_filter = json.loads(dataset_filter)
return client.usage(scope=scope,
type=type_,
timeframe=timeframe,
time_period=time_period,
configuration=dataset_configuration,
aggregation=dataset_aggregation,
grouping=dataset_grouping,
filter=dataset_filter)
def costmanagement_query_usage_by_external_cloud_provider_type(cmd, client,
external_cloud_provider_type,
external_cloud_provider_id,
type_,
timeframe,
time_period=None,
dataset_configuration=None,
dataset_aggregation=None,
dataset_grouping=None,
dataset_filter=None):
if isinstance(dataset_aggregation, str):
dataset_aggregation = json.loads(dataset_aggregation)
if isinstance(dataset_filter, str):
dataset_filter = json.loads(dataset_filter)
return client.usage_by_external_cloud_provider_type(external_cloud_provider_type=external_cloud_provider_type,
external_cloud_provider_id=external_cloud_provider_id,
type=type_,
timeframe=timeframe,
time_period=time_period,
configuration=dataset_configuration,
aggregation=dataset_aggregation,
grouping=dataset_grouping,
filter=dataset_filter)
# def costmanagement_export_list(cmd, client,
# scope):
# return client.list(scope=scope)
# def costmanagement_export_show(cmd, client,
# scope,
# export_name):
# if scope is not None and export_name is not None:
# return client.get(scope=scope,
# export_name=export_name)
# return client.get_execution_history(scope=scope,
# export_name=export_name)
# def costmanagement_export_create(cmd, client,
# scope,
# export_name,
# e_tag=None,
# definition_type=None,
# definition_timeframe=None,
# definition_time_period=None,
# definition_dataset_configuration=None,
# definition_dataset_aggregation=None,
# definition_dataset_grouping=None,
# definition_dataset_filter=None,
# delivery_info_destination=None,
# schedule_status=None,
# schedule_recurrence=None,
# schedule_recurrence_period=None):
# if isinstance(definition_dataset_aggregation, str):
# definition_dataset_aggregation = json.loads(definition_dataset_aggregation)
# if isinstance(definition_dataset_filter, str):
# definition_dataset_filter = json.loads(definition_dataset_filter)
# return client.create_or_update(scope=scope,
# export_name=export_name,
# e_tag=e_tag,
# type=definition_type,
# timeframe=definition_timeframe,
# time_period=definition_time_period,
# configuration=definition_dataset_configuration,
# aggregation=definition_dataset_aggregation,
# grouping=definition_dataset_grouping,
# filter=definition_dataset_filter,
# destination=delivery_info_destination,
# status=schedule_status,
# recurrence=schedule_recurrence,
# recurrence_period=schedule_recurrence_period)
# def costmanagement_export_update(cmd, client,
# scope,
# export_name,
# e_tag=None,
# definition_type=None,
# definition_timeframe=None,
# definition_time_period=None,
# definition_dataset_configuration=None,
# definition_dataset_aggregation=None,
# definition_dataset_grouping=None,
# definition_dataset_filter=None,
# delivery_info_destination=None,
# schedule_status=None,
# schedule_recurrence=None,
# schedule_recurrence_period=None):
# if isinstance(definition_dataset_aggregation, str):
# definition_dataset_aggregation = json.loads(definition_dataset_aggregation)
# if isinstance(definition_dataset_filter, str):
# definition_dataset_filter = json.loads(definition_dataset_filter)
# return client.create_or_update(scope=scope,
# export_name=export_name,
# e_tag=e_tag,
# type=definition_type,
# timeframe=definition_timeframe,
# time_period=definition_time_period,
# configuration=definition_dataset_configuration,
# aggregation=definition_dataset_aggregation,
# grouping=definition_dataset_grouping,
# filter=definition_dataset_filter,
# destination=delivery_info_destination,
# status=schedule_status,
# recurrence=schedule_recurrence,
# recurrence_period=schedule_recurrence_period)
# def costmanagement_export_delete(cmd, client,
# scope,
# export_name):
# return client.delete(scope=scope,
# export_name=export_name)
# def costmanagement_export_execute(cmd, client,
# scope,
# export_name):
# return client.execute(scope=scope,
# export_name=export_name)
| 55.051672
| 115
| 0.420384
| 1,197
| 18,112
| 5.998329
| 0.092732
| 0.054318
| 0.087744
| 0.05571
| 0.865599
| 0.850139
| 0.820334
| 0.802507
| 0.800139
| 0.785376
| 0
| 0
| 0.520925
| 18,112
| 328
| 116
| 55.219512
| 0.827475
| 0.794777
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.022222
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2cc1e779533966516e92145df0cfffbbc25cca5e
| 203
|
py
|
Python
|
FSMSIM/expr/expr.py
|
FSMSIM/FSMSIM
|
a3069c07aeca6e814519871f4c93a88da32c9e1d
|
[
"BSD-3-Clause"
] | null | null | null |
FSMSIM/expr/expr.py
|
FSMSIM/FSMSIM
|
a3069c07aeca6e814519871f4c93a88da32c9e1d
|
[
"BSD-3-Clause"
] | null | null | null |
FSMSIM/expr/expr.py
|
FSMSIM/FSMSIM
|
a3069c07aeca6e814519871f4c93a88da32c9e1d
|
[
"BSD-3-Clause"
] | null | null | null |
class Expr:
def evaluate(self) -> str:
raise NotImplementedError()
def __str__(self) -> str:
return self.evaluate()
def __repr__(self) -> str:
return self.evaluate()
| 22.555556
| 35
| 0.600985
| 22
| 203
| 5.181818
| 0.454545
| 0.184211
| 0.22807
| 0.298246
| 0.438596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.280788
| 203
| 9
| 36
| 22.555556
| 0.780822
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.285714
| 0.857143
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e2d2d6275b0d55d2dab16ce10300f2ec64ebe29e
| 3,161
|
py
|
Python
|
transformer/Attention.py
|
sajith-rahim/attention
|
5bb8abd5022dbd67300e5ecc6a5dd5edb0844983
|
[
"MIT"
] | null | null | null |
transformer/Attention.py
|
sajith-rahim/attention
|
5bb8abd5022dbd67300e5ecc6a5dd5edb0844983
|
[
"MIT"
] | null | null | null |
transformer/Attention.py
|
sajith-rahim/attention
|
5bb8abd5022dbd67300e5ecc6a5dd5edb0844983
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch
class Attention(nn.Module):
r"""
Scaled Dot Product Attention
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_dropout_rate=0.1, projection_dropout_rate=0.1):
super(Attention, self).__init__()
assert dim % num_heads == 0, "Dimension has to divisible by n_heads inorder to split!"
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
# combing q,k,v as single layer [d, d*3]
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_dropout = nn.Dropout(attn_dropout_rate)
self.projection = nn.Linear(dim, dim)
self.projection_drop = nn.Dropout(projection_dropout_rate)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x)
# 3 matrices - q,k,v
qkv = qkv.reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
# q, k, v = qkv[0], qkv[1], qkv[2]
q, k, v = qkv.unbind(0) # make torch-script happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.projection_drop(x)
return x
class MaskedAttention(nn.Module):
r"""
Scaled Dot Product Attention with Mask
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_dropout_rate=0.1, projection_dropout_rate=0.1):
super(MaskedAttention, self).__init__()
assert dim % num_heads == 0, "Dimension has to divisible by n_heads inorder to split!"
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
# combing q,k,v as single layer [d, d*3]
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_dropout = nn.Dropout(attn_dropout_rate)
self.projection = nn.Linear(dim, dim)
self.projection_drop = nn.Dropout(projection_dropout_rate)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = self.qkv(x)
# 3 matrices - q,k,v
qkv = qkv.reshape(B, N, 3, self.num_heads, C // self.num_heads)
qkv = qkv.permute(2, 0, 3, 1, 4)
# q, k, v = qkv[0], qkv[1], qkv[2]
q, k, v = qkv.unbind(0) # make torch-script happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
mask_value = -torch.finfo(attn.dtype).max # -inf (-ve of max of machine limit of dtype)
assert mask.shape[-1] == attn.shape[-1], 'mask has incorrect dimensions'
mask = mask[:, None, :] * mask[:, :, None]
mask = mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1)
not_mask = ~mask
attn.masked_fill_(not_mask, mask_value)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.projection_drop(x)
return x
| 32.255102
| 109
| 0.582727
| 485
| 3,161
| 3.651546
| 0.193814
| 0.067758
| 0.013552
| 0.020328
| 0.8131
| 0.8131
| 0.8131
| 0.774704
| 0.774704
| 0.774704
| 0
| 0.025618
| 0.283771
| 3,161
| 98
| 110
| 32.255102
| 0.756625
| 0.126542
| 0
| 0.711864
| 0
| 0
| 0.051047
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 1
| 0.067797
| false
| 0
| 0.033898
| 0
| 0.169492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e2e27aa9b74f6d57ca6bfcf0d0eea858bf7e9a7c
| 58,684
|
py
|
Python
|
tests/test_index.py
|
sanders41/meilisearch-cli
|
b691be610e84640fe9877aed23131e995eb717b8
|
[
"MIT"
] | 2
|
2022-03-17T02:25:05.000Z
|
2022-03-30T07:32:21.000Z
|
tests/test_index.py
|
sanders41/meilisearch-cli
|
b691be610e84640fe9877aed23131e995eb717b8
|
[
"MIT"
] | 86
|
2021-10-17T19:23:01.000Z
|
2022-03-29T00:34:19.000Z
|
tests/test_index.py
|
sanders41/meilisearch-cli
|
b691be610e84640fe9877aed23131e995eb717b8
|
[
"MIT"
] | 2
|
2021-11-09T17:58:01.000Z
|
2021-12-22T00:46:35.000Z
|
from unittest.mock import patch
import pytest
from meilisearch.client import Client
from meilisearch.errors import MeiliSearchApiError
from meilisearch.index import Index
from requests.models import Response
from meilisearch_cli.main import app
from tests.utils import get_update_id_from_output
@pytest.mark.parametrize("use_env", [True, False])
@pytest.mark.parametrize("raw", [True, False])
def test_create_index(
use_env, raw, test_runner, index_uid, base_url, master_key, client, monkeypatch
):
args = ["index", "create", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
if raw:
args.append("--raw")
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
result = client.get_index(index_uid)
assert result.uid == index_uid
out = runner_result.stdout
assert "uid" in out
assert index_uid in out
if raw:
assert "{" in out
assert "}" in out
@pytest.mark.usefixtures("env_vars")
def test_create_index_with_primary_key(test_runner, index_uid):
primary_key = "alt_id"
args = ["index", "create", index_uid, "--primary-key", primary_key]
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert "uid" in out
assert index_uid in out
assert "primary_key" in out
assert primary_key in out
@pytest.mark.usefixtures("env_vars")
def test_create_index_exists_error(test_runner, client, index_uid):
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
runner_result = test_runner.invoke(app, ["index", "create", index_uid], catch_exceptions=False)
out = runner_result.stdout
assert "already exists" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Client, "create_index")
def test_create_index_error(mock_create, test_runner, index_uid):
mock_create.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(
app, ["index", "create", index_uid, "--primary-key", "alt_id"], catch_exceptions=False
)
def test_create_index_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "create", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize("use_env", [True, False])
def test_delete_index(use_env, base_url, master_key, test_runner, index_uid, monkeypatch, client):
args = ["index", "delete", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
assert len(client.get_indexes()) == 1
runner_result = test_runner.invoke(app, args)
assert client.get_indexes() == []
out = runner_result.stdout
assert "successfully deleted" in out
@pytest.mark.usefixtures("env_vars")
def test_delete_index_not_found_error(test_runner):
runner_result = test_runner.invoke(app, ["index", "delete", "bad"], catch_exceptions=False)
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "delete")
def test_delete_index_error(mock_delete, test_runner, index_uid):
mock_delete.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "delete", index_uid], catch_exceptions=False)
def test_delete_index_no_url_maseter_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "delete", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize("use_env", [True, False])
@pytest.mark.parametrize("raw", [True, False])
def test_get_index(use_env, raw, base_url, master_key, test_runner, index_uid, monkeypatch, client):
args = ["index", "get", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
if raw:
args.append("--raw")
result = client.create_index(index_uid)
client.wait_for_task(result["uid"])
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "uid" in out
assert index_uid in out
if raw:
assert "{" in out
assert "}" in out
@pytest.mark.usefixtures("env_vars")
def test_get_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(app, ["index", "get", index_uid])
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Client, "get_raw_index")
def test_get_index_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "get", index_uid], catch_exceptions=False)
def test_get_index_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "get", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize("use_env", [True, False])
@pytest.mark.parametrize("raw", [True, False])
def test_get_indexes(
use_env, raw, base_url, master_key, test_runner, index_uid, monkeypatch, client
):
args = ["index", "get-all"]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index2 = "test"
response = client.create_index(index2)
client.wait_for_task(response["uid"])
assert len(client.get_indexes()) == 2
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "uid" in out
assert index_uid in out
assert index2 in out
if raw:
assert "{" in out
assert "}" in out
def test_get_indexes_no_url_master_key(test_runner):
runner_result = test_runner.invoke(app, ["index", "get-all"])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize("use_env", [True, False])
def test_get_primary_key(
use_env, index_uid, base_url, master_key, test_runner, client, monkeypatch
):
args = ["index", "get-primary-key", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
primary_key = "id"
result = client.create_index(index_uid, {"primaryKey": primary_key})
client.wait_for_task(result["uid"])
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert primary_key in out
def test_get_primary_key_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "get-primary-key", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_get_primary_key_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(app, ["index", "get-primary-key", index_uid])
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "get_primary_key")
def test_get_primary_key_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "get-primary-key", index_uid], catch_exceptions=False)
@pytest.mark.parametrize("use_env", [True, False])
@pytest.mark.parametrize("raw", [True, False])
def test_get_settings(
use_env, raw, index_uid, base_url, master_key, test_runner, client, monkeypatch
):
args = ["index", "get-settings", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
if raw:
args.append("--raw")
result = client.create_index(index_uid)
client.wait_for_task(result["uid"])
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "displayedAttributes" in out
assert "searchableAttributes" in out
assert "filterableAttributes" in out
assert "sortableAttributes" in out
assert "rankingRules" in out
assert "stopWords" in out
assert "synonyms" in out
assert "distinctAttribute" in out
if raw:
assert "{" in out
assert "}" in out
def test_get_settings_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "get-settings", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_get_settings_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(app, ["index", "get-settings", index_uid])
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "get_settings")
def test_get_settings_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "get-settings", index_uid], catch_exceptions=False)
@pytest.mark.parametrize("use_env", [True, False])
@pytest.mark.parametrize("raw", [True, False])
def test_get_stats(use_env, raw, index_uid, base_url, master_key, test_runner, client, monkeypatch):
args = ["index", "get-stats", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
if raw:
args.append("--raw")
result = client.create_index(index_uid)
client.wait_for_task(result["uid"])
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "numberOfDocuments" in out
if raw:
assert "{" in out
assert "}" in out
def test_get_stats_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "get-stats", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_get_stats_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(app, ["index", "get-stats", index_uid])
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "get_stats")
def test_get_stats_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "get-stats", index_uid], catch_exceptions=False)
@pytest.mark.parametrize("use_env", [True, False])
@pytest.mark.parametrize("raw", [True, False])
def test_get_task(
use_env, raw, index_uid, base_url, master_key, test_runner, client, small_movies, monkeypatch
):
args = ["index", "get-task", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
update = index.add_documents(small_movies)
args.append(str(update["uid"]))
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert "status" in out
assert "uid" in out
assert "type" in out
assert "enqueuedAt" in out
if raw:
assert "{" in out
assert "}" in out
def test_get_task_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "get-task", index_uid, "0"])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_get_task_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(app, ["index", "get-task", index_uid, "0"])
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "get_task")
def test_get_task_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "get-task", index_uid, "0"], catch_exceptions=False)
@pytest.mark.parametrize("use_env", [True, False])
def test_reset_displayed_attributes_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
empty_index,
monkeypatch,
):
index = empty_index()
args = ["index", "reset-displayed-attributes", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
response = index.update_displayed_attributes(["title", "genre"])
index.wait_for_task(response["uid"])
assert index.get_displayed_attributes() == ["title", "genre"]
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_reset_displayed_attributes_wait(
wait_flag,
raw,
index_uid,
test_runner,
empty_index,
):
index = empty_index()
args = ["index", "reset-displayed-attributes", index_uid, wait_flag]
if raw:
args.append("--raw")
response = index.update_displayed_attributes(["title", "genre"])
index.wait_for_task(response["uid"])
assert index.get_displayed_attributes() == ["title", "genre"]
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
if raw:
assert '"*"' in out
else:
assert "*" in out
def test_reset_displayed_attributes_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "reset-displayed-attributes", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_reset_displayed_attributes_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(
app, ["index", "reset-displayed-attributes", index_uid, "--wait"], catch_exceptions=False
)
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "wait_for_task")
def test_reset_displayed_attributes_failed_status(mock_get, test_runner, index_uid, empty_index):
empty_index()
mock_get.side_effect = [
{
"status": "failed",
"uid": 0,
"type": {"name": "ResetDisplayedAttributes", "number": 0},
"error": {
"code": "index_already_exists",
},
"enqueuedAt": "2021-02-14T14:07:09.364505700Z",
}
]
runner_result = test_runner.invoke(
app, ["index", "reset-displayed-attributes", index_uid, "-w"], catch_exceptions=False
)
out = runner_result.stdout
assert "failed" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "reset_displayed_attributes")
def test_reset_displayed_attributes_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(
app, ["index", "reset-displayed-attributes", index_uid], catch_exceptions=False
)
@pytest.mark.parametrize("use_env", [True, False])
def test_reset_distinct_attribute_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "reset-distinct-attribute", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
index = client.index(index_uid)
update = index.update_distinct_attribute("title")
index.wait_for_task(update["uid"])
assert index.get_distinct_attribute() == "title"
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_reset_distinct_attribute_wait(
raw,
wait_flag,
index_uid,
test_runner,
client,
):
args = ["index", "reset-distinct-attribute", index_uid, wait_flag]
if raw:
args.append("--raw")
index = client.index(index_uid)
update = index.update_distinct_attribute("title")
index.wait_for_task(update["uid"])
assert index.get_distinct_attribute() == "title"
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
if raw:
assert "null" in out
else:
assert "" in out
@pytest.mark.parametrize("remove_env", ["all", "MEILI_HTTP_ADDR", "MEILI_MASTER_KEY"])
@pytest.mark.usefixtures("env_vars")
def test_reset_distinct_attribute_no_url_master_key(
remove_env, index_uid, test_runner, monkeypatch
):
if remove_env == "all":
monkeypatch.delenv("MEILI_HTTP_ADDR", raising=False)
monkeypatch.delenv("MEILI_MASTER_KEY", raising=False)
else:
monkeypatch.delenv(remove_env, raising=False)
runner_result = test_runner.invoke(app, ["index", "reset-distinct-attribute", index_uid])
out = runner_result.stdout
if remove_env == "all":
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
else:
assert remove_env in out
@pytest.mark.usefixtures("env_vars")
def test_reset_distinct_attribute_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(
app, ["index", "reset-distinct-attribute", index_uid, "-w"], catch_exceptions=False
)
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "reset_distinct_attribute")
def test_reset_distinct_attribute_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(
app, ["index", "reset-distinct-attribute", index_uid], catch_exceptions=False
)
@pytest.mark.parametrize("use_env", [True, False])
def test_reset_filterable_attributes_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "reset-filterable-attributes", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
index = client.index(index_uid)
update = index.update_displayed_attributes(["title", "genre"])
index.wait_for_task(update["uid"])
assert index.get_displayed_attributes() == ["title", "genre"]
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_reset_filterable_attributes_wait(
raw,
wait_flag,
index_uid,
test_runner,
client,
):
args = ["index", "reset-filterable-attributes", index_uid, wait_flag]
if raw:
args.append("--raw")
index = client.index(index_uid)
update = index.update_displayed_attributes(["title", "genre"])
index.wait_for_task(update["uid"])
assert index.get_displayed_attributes() == ["title", "genre"]
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "[]" in out
def test_reset_filterable_attributes_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "reset-filterable-attributes", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_reset_filterable_attributes_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(
app, ["index", "reset-filterable-attributes", index_uid, "-w"]
)
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "reset_filterable_attributes")
def test_reset_filterable_attributes_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(
app, ["index", "reset-filterable-attributes", index_uid], catch_exceptions=False
)
@pytest.mark.parametrize("use_env", [True, False])
def test_reset_ranking_rules_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "reset-ranking-rules", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
index = client.index(index_uid)
update = index.update_displayed_attributes(["sort", "words"])
index.wait_for_task(update["uid"])
assert index.get_displayed_attributes() == ["sort", "words"]
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
"uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_reset_ranking_rules_wait(
wait_flag,
raw,
index_uid,
test_runner,
client,
):
args = ["index", "reset-ranking-rules", index_uid, wait_flag]
if raw:
args.append("--raw")
index = client.index(index_uid)
update = index.update_displayed_attributes(["sort", "words"])
index.wait_for_task(update["uid"])
assert index.get_displayed_attributes() == ["sort", "words"]
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
for e in ["words", "typo", "proximity", "attribute", "sort", "exactness"]:
assert e in out
def test_reset_ranking_rules_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "reset-ranking-rules", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_reset_ranking_rules_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(
app, ["index", "reset-ranking-rules", index_uid, "-w"], catch_exceptions=False
)
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "reset_ranking_rules")
def test_reset_ranking_rules_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "reset-ranking-rules", index_uid], catch_exceptions=False)
@pytest.mark.parametrize("use_env", [True, False])
def test_reset_searchable_attributes_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "reset-searchable-attributes", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
index = client.index(index_uid)
update = index.update_displayed_attributes(["title", "genre"])
index.wait_for_task(update["uid"])
assert index.get_displayed_attributes() == ["title", "genre"]
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_reset_searchable_attributes_wait(
raw,
wait_flag,
index_uid,
test_runner,
client,
):
args = ["index", "reset-searchable-attributes", index_uid, wait_flag]
if raw:
args.append("--raw")
index = client.index(index_uid)
update = index.update_displayed_attributes(["title", "genre"])
index.wait_for_task(update["uid"])
assert index.get_displayed_attributes() == ["title", "genre"]
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "*" in out
def test_reset_searchable_attributes_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "reset-searchable-attributes", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_reset_searchable_attributes_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(
app, ["index", "reset-searchable-attributes", index_uid, "-w"]
)
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "reset_searchable_attributes")
def test_reset_searchable_attributes_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(
app, ["index", "reset-searchable-attributes", index_uid], catch_exceptions=False
)
@pytest.mark.parametrize("use_env", [True, False])
def test_reset_settings_no_wait(
use_env, index_uid, base_url, master_key, test_runner, client, monkeypatch
):
args = ["index", "reset-settings", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
index = client.index(index_uid)
updated_settings = {
"displayedAttributes": ["genre", "title"],
"searchableAttributes": ["genre", "title"],
"filterableAttributes": ["genre", "title"],
"sortableAttributes": ["genre", "title"],
"rankingRules": ["sort", "words"],
"stopWords": ["a", "the"],
"synonyms": {"logan": ["marvel", "wolverine"]},
"distinctAttribute": "title",
}
update = index.update_settings(updated_settings)
index.wait_for_task(update["uid"])
assert index.get_settings() == updated_settings
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_reset_settings_wait(raw, wait_flag, index_uid, test_runner, client):
args = ["index", "reset-settings", index_uid, wait_flag]
if raw:
args.append("--raw")
index = client.index(index_uid)
updated_settings = {
"displayedAttributes": ["genre", "title"],
"searchableAttributes": ["genre", "title"],
"filterableAttributes": ["genre", "title"],
"sortableAttributes": ["genre", "title"],
"rankingRules": ["sort", "words"],
"stopWords": ["a", "the"],
"synonyms": {"logan": ["marvel", "wolverine"]},
"distinctAttribute": "title",
}
update = index.update_settings(updated_settings)
index.wait_for_task(update["uid"])
assert index.get_settings() == updated_settings
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert "displayedAttributes" in out
assert "*" in out
assert "searchableAttributes" in out
assert "filterableAttributes" in out
assert "[]" in out
assert "sortableAttributes" in out
assert "rankingRules" in out
assert "stopWords" in out
assert "synonyms" in out
assert "{}" in out
assert "distinctAttribute" in out
assert "words" in out
assert "typo" in out
assert "proximity" in out
assert "attribute" in out
assert "sort" in out
assert "exactness" in out
if raw:
assert "null" in out
assert "{" in out
assert "}" in out
else:
assert "None" in out
def test_reset_settings_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "reset-settings", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_reset_settings_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(app, ["index", "reset-settings", index_uid, "-w"])
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "reset_settings")
def test_reset_settings_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "reset-settings", index_uid], catch_exceptions=False)
@pytest.mark.parametrize("use_env", [True, False])
def test_reset_stop_words_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "reset-stop-words", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
index = client.index(index_uid)
update = index.update_stop_words(["a", "the"])
index.wait_for_task(update["uid"])
assert index.get_stop_words() == ["a", "the"]
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_reset_stop_words_wait(
wait_flag,
raw,
index_uid,
test_runner,
client,
):
args = ["index", "reset-stop-words", index_uid, wait_flag]
if raw:
args.append("--raw")
index = client.index(index_uid)
update = index.update_stop_words(["a", "the"])
index.wait_for_task(update["uid"])
assert index.get_stop_words() == ["a", "the"]
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert "[]" in out
def test_reset_stop_words_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "reset-stop-words", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_reset_stop_words_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(app, ["index", "reset-stop-words", index_uid, "-w"])
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "reset_stop_words")
def test_reset_stop_words_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "reset-stop-words", index_uid], catch_exceptions=False)
@pytest.mark.parametrize("use_env", [True, False])
def test_reset_synonyms_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "reset-synonyms", index_uid]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
index = client.index(index_uid)
update = index.update_synonyms({"logan": ["marval", "wolverine"]})
index.wait_for_task(update["uid"])
assert index.get_synonyms() == {"logan": ["marval", "wolverine"]}
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_reset_synonyms_wait(
wait_flag,
raw,
index_uid,
test_runner,
client,
):
args = ["index", "reset-synonyms", index_uid, wait_flag]
if raw:
args.append("--raw")
index = client.index(index_uid)
update = index.update_synonyms({"logan": ["marval", "wolverine"]})
index.wait_for_task(update["uid"])
assert index.get_synonyms() == {"logan": ["marval", "wolverine"]}
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert "{}" in out
def test_reset_syonyms_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "reset-synonyms", index_uid])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_reset_synonyms_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(app, ["index", "reset-synonyms", index_uid, "-w"])
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "reset_synonyms")
def test_reset_synonyms_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "reset-synonyms", index_uid], catch_exceptions=False)
@pytest.mark.parametrize("use_env", [True, False])
def test_update_displayed_attributes_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "update-displayed-attributes", index_uid, "genre", "title"]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
client.index(index_uid).wait_for_task(get_update_id_from_output(out))
assert index.get_displayed_attributes() == ["genre", "title"]
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_update_displayed_attributes_wait(
wait_flag,
raw,
index_uid,
test_runner,
client,
):
args = ["index", "update-displayed-attributes", index_uid, "genre", "title", wait_flag]
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert index.get_displayed_attributes() == ["genre", "title"]
for e in ["genre", "title"]:
assert e in out
def test_update_displayed_attributes_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(
app, ["index", "update-displayed-attributes", index_uid, "title"]
)
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize(
"wait_flag, expected",
[
(None, "uid"),
("--wait", "title"),
("-w", "title"),
],
)
@pytest.mark.parametrize("use_env", [True, False])
@pytest.mark.parametrize("raw", [True, False])
def test_update_distinct_attribute(
use_env,
raw,
wait_flag,
expected,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "update-distinct-attribute", index_uid, "title"]
if wait_flag:
args.append(wait_flag)
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
if not wait_flag:
client.index(index_uid).wait_for_task(get_update_id_from_output(out))
assert index.get_distinct_attribute() == "title"
assert expected in out
def test_update_distinct_attribute_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(
app, ["index", "update-distinct-attribute", index_uid, "title"]
)
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize("use_env", [True, False])
@pytest.mark.parametrize("raw", [True, False])
def test_update_index(
use_env,
raw,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
primary_key = "title"
args = ["index", "update", index_uid, primary_key]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
assert index.primary_key is None
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert index.get_primary_key() == primary_key
assert "primary_key" in out
assert primary_key in out
def test_update_index_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "update", index_uid, "test"])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_update_index_not_found_error(test_runner, index_uid):
runner_result = test_runner.invoke(app, ["index", "update", index_uid, "test"])
out = runner_result.stdout
assert "not found" in out
@pytest.mark.usefixtures("env_vars")
@patch.object(Index, "update")
def test_update_index_error(mock_get, test_runner, index_uid):
mock_get.side_effect = MeiliSearchApiError("bad", Response())
with pytest.raises(MeiliSearchApiError):
test_runner.invoke(app, ["index", "update", index_uid, "test"], catch_exceptions=False)
@pytest.mark.usefixtures("env_vars")
def test_update_index_primary_key_exists(
index_uid,
test_runner,
client,
small_movies,
):
primary_key = "title"
args = ["index", "update", index_uid, primary_key]
response = client.create_index(index_uid, {"primaryKey": "id"})
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
update = index.add_documents(small_movies)
index.wait_for_task(update["uid"])
assert index.primary_key == "id"
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "error" in out
@pytest.mark.parametrize("use_env", [True, False])
def test_update_ranking_rules_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "update-ranking-rules", index_uid, "sort", "words"]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
update_id = get_update_id_from_output(out)
index.wait_for_task(update_id)
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_update_ranking_rules(
raw,
wait_flag,
index_uid,
test_runner,
client,
):
args = ["index", "update-ranking-rules", index_uid, "sort", "words", wait_flag]
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert index.get_ranking_rules() == ["sort", "words"]
for e in ["sort", "words"]:
assert e in out
def test_update_ranking_rules_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "update-ranking-rules", index_uid, "sort"])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize("use_env", [True, False])
def test_update_searchable_attributes_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "update-searchable-attributes", index_uid, "genre", "title"]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
update_id = get_update_id_from_output(out)
index.wait_for_task(update_id)
assert index.get_searchable_attributes() == ["genre", "title"]
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_update_searchable_attributes_wait(
raw,
wait_flag,
index_uid,
test_runner,
client,
):
args = ["index", "update-searchable-attributes", index_uid, "genre", "title", wait_flag]
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert index.get_searchable_attributes() == ["genre", "title"]
for e in ["genre", "title"]:
assert e in out
def test_update_searchable_attributes_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(
app, ["index", "update-searchable-attributes", index_uid, "title"]
)
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize("use_env", [True, False])
@pytest.mark.parametrize("raw", [True, False])
def test_update_settings_no_wait(
use_env,
raw,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
updated_settings = {
"displayedAttributes": ["genre", "title"],
"searchableAttributes": ["genre", "title"],
"filterableAttributes": ["genre", "title"],
"sortableAttributes": ["genre", "title"],
"rankingRules": ["sort", "words"],
"stopWords": ["a", "the"],
"synonyms": {"logan": ["marvel", "wolverine"]},
"distinctAttribute": "title",
}
args = [
"index",
"update-settings",
index_uid,
"--distinct-attribute",
updated_settings["distinctAttribute"],
"--synonyms",
'{"logan": ["marvel", "wolverine"]}',
]
for attribute in updated_settings["displayedAttributes"]:
args.append("--displayed-attributes")
args.append(attribute)
for attribute in updated_settings["filterableAttributes"]:
args.append("--filterable-attributes")
args.append(attribute)
for rule in updated_settings["rankingRules"]:
args.append("--ranking-rules")
args.append(rule)
for attribute in updated_settings["searchableAttributes"]:
args.append("--searchable-attributes")
args.append(attribute)
for attribute in updated_settings["sortableAttributes"]:
args.append("--sortable-attributes")
args.append(attribute)
for word in updated_settings["stopWords"]:
args.append("--stop-words")
args.append(word)
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
update_id = get_update_id_from_output(out)
index.wait_for_task(update_id)
assert index.get_settings() == updated_settings
assert "uid" in out
if raw:
assert "{" in out
assert "}" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.usefixtures("env_vars")
def test_update_settings(
wait_flag,
index_uid,
test_runner,
client,
):
updated_settings = {
"displayedAttributes": ["genre", "title"],
"searchableAttributes": ["genre", "title"],
"filterableAttributes": ["genre", "title"],
"sortableAttributes": ["genre", "title"],
"rankingRules": ["sort", "words"],
"stopWords": ["a", "the"],
"synonyms": {"logan": ["marvel", "wolverine"]},
"distinctAttribute": "title",
}
args = [
"index",
"update-settings",
index_uid,
"--distinct-attribute",
updated_settings["distinctAttribute"],
"--synonyms",
'{"logan": ["marvel", "wolverine"]}',
wait_flag,
]
for attribute in updated_settings["displayedAttributes"]:
args.append("--displayed-attributes")
args.append(attribute)
for attribute in updated_settings["filterableAttributes"]:
args.append("--filterable-attributes")
args.append(attribute)
for rule in updated_settings["rankingRules"]:
args.append("--ranking-rules")
args.append(rule)
for attribute in updated_settings["searchableAttributes"]:
args.append("--searchable-attributes")
args.append(attribute)
for attribute in updated_settings["sortableAttributes"]:
args.append("--sortable-attributes")
args.append(attribute)
for word in updated_settings["stopWords"]:
args.append("--stop-words")
args.append(word)
if wait_flag:
args.append(wait_flag)
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
if not wait_flag:
update_id = get_update_id_from_output(out)
index.wait_for_task(update_id)
assert index.get_settings() == updated_settings
assert "displayedAttributes"
for value in updated_settings["displayedAttributes"]:
assert value in out
assert "searchableAttributes" in out
for value in updated_settings["searchableAttributes"]:
assert value in out
assert "filterableAttributes" in out
for value in updated_settings["filterableAttributes"]:
assert value in out
assert "sortableAttributes" in out
for value in updated_settings["sortableAttributes"]:
assert value in out
assert "rankingRules" in out
for value in updated_settings["rankingRules"]:
assert value in out
assert "stopWords" in out
for value in updated_settings["stopWords"]:
assert value in out
assert "synonyms" in out
for key in updated_settings["synonyms"]:
assert key in out
for value in updated_settings["synonyms"][key]: # type: ignore
assert value in out
assert "distinctAttribute" in out
assert updated_settings["distinctAttribute"] in out
def test_update_settings_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(
app, ["index", "update-settings", index_uid, "--distinct-attribute", "title"]
)
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_update_settings_json_error(
index_uid,
test_runner,
):
args = [
"index",
"update-settings",
index_uid,
"--synonyms",
"test",
]
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "Unable to parse" in out
@pytest.mark.parametrize("use_env", [True, False])
def test_update_sortable_attributes_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "update-sortable-attributes", index_uid, "genre", "title"]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
update_id = get_update_id_from_output(out)
index.wait_for_task(update_id)
assert index.get_sortable_attributes() == ["genre", "title"]
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_update_sortable_attributes_wait(
raw,
wait_flag,
index_uid,
test_runner,
client,
):
args = ["index", "update-sortable-attributes", index_uid, "genre", "title", wait_flag]
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert index.get_sortable_attributes() == ["genre", "title"]
for e in ["genre", "title"]:
assert e in out
def test_update_sotrable_attributes_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(
app, ["index", "update-sortable-attributes", index_uid, "title"]
)
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize("use_env", [True, False])
def test_update_stop_words_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "update-stop-words", index_uid, "a", "the"]
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
update_id = get_update_id_from_output(out)
index.wait_for_task(update_id)
assert index.get_stop_words() == ["a", "the"]
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_update_stop_words_wait(
raw,
wait_flag,
index_uid,
test_runner,
client,
):
args = ["index", "update-stop-words", index_uid, "a", "the", wait_flag]
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert index.get_stop_words() == ["a", "the"]
for e in ["a", "the"]:
assert e in out
def test_update_stop_words_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(app, ["index", "update-stop-words", index_uid, "the"])
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.parametrize("use_env", [True, False])
def test_update_synonyms_no_wait(
use_env,
index_uid,
base_url,
master_key,
test_runner,
client,
monkeypatch,
):
args = ["index", "update-synonyms", index_uid, '{"logan": ["marvel", "wolverine"]}']
if use_env:
monkeypatch.setenv("MEILI_HTTP_ADDR", base_url)
monkeypatch.setenv("MEILI_MASTER_KEY", master_key)
else:
args.append("--url")
args.append(base_url)
args.append("--master-key")
args.append(master_key)
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
update_id = get_update_id_from_output(out)
index.wait_for_task(update_id)
assert index.get_synonyms() == {"logan": ["marvel", "wolverine"]}
assert "uid" in out
@pytest.mark.parametrize("wait_flag", ["--wait", "-w"])
@pytest.mark.parametrize("raw", [True, False])
@pytest.mark.usefixtures("env_vars")
def test_update_synonyms_wait(
raw,
wait_flag,
index_uid,
test_runner,
client,
):
args = ["index", "update-synonyms", index_uid, '{"logan": ["marvel", "wolverine"]}', wait_flag]
if raw:
args.append("--raw")
response = client.create_index(index_uid)
client.wait_for_task(response["uid"])
index = client.get_index(index_uid)
runner_result = test_runner.invoke(app, args, catch_exceptions=False)
out = runner_result.stdout
assert index.get_synonyms() == {"logan": ["marvel", "wolverine"]}
assert "logan" in out
def test_update_synonyms_no_url_master_key(index_uid, test_runner):
runner_result = test_runner.invoke(
app, ["index", "update-synonyms", index_uid, '{"logan": ["marvel", "wolverine"]}']
)
out = runner_result.stdout
assert "MEILI_HTTP_ADDR" in out
assert "MEILI_MASTER_KEY" in out
@pytest.mark.usefixtures("env_vars")
def test_update_synonyms_json_error(
index_uid,
test_runner,
):
args = [
"index",
"update-synonyms",
index_uid,
"test",
]
runner_result = test_runner.invoke(app, args)
out = runner_result.stdout
assert "Unable to parse" in out
| 29.593545
| 100
| 0.682435
| 7,534
| 58,684
| 5.032387
| 0.021635
| 0.055705
| 0.043045
| 0.051116
| 0.947064
| 0.924724
| 0.904732
| 0.881389
| 0.875666
| 0.860526
| 0
| 0.000693
| 0.188177
| 58,684
| 1,982
| 101
| 29.608476
| 0.795134
| 0.000204
| 0
| 0.793059
| 0
| 0
| 0.154219
| 0.020027
| 0
| 0
| 0
| 0
| 0.140103
| 1
| 0.065553
| false
| 0
| 0.005141
| 0
| 0.070694
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
39236c92e70fa69b82ba7bccdca25eb370890a2d
| 40,034
|
py
|
Python
|
awp5/api/archiveselection.py
|
ThomasWaldinger/py_awp5
|
10077ab81eab506bea58a67242c2d550988ec18c
|
[
"Apache-2.0"
] | 2
|
2019-04-10T16:46:19.000Z
|
2020-08-18T21:57:59.000Z
|
awp5/api/archiveselection.py
|
ThomasWaldinger/py_awp5
|
10077ab81eab506bea58a67242c2d550988ec18c
|
[
"Apache-2.0"
] | null | null | null |
awp5/api/archiveselection.py
|
ThomasWaldinger/py_awp5
|
10077ab81eab506bea58a67242c2d550988ec18c
|
[
"Apache-2.0"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Thomas Waldinger. All rights reserved.
# Licensed under the Apache License, Version 2.0. See
# License.txt in the project root for license
# information.
# ---------------
"""
ArchiveSelection
The archive selection is used to prepare one or more files and/or directories
for the archive operation. You must create new archive selection resource for
each archive session. You can use the resource methods to populate the
selection (i.e. add files) and then submit the entire selection for immediate
or scheduled execution. The archive selection is a temporary resource. It does
not survive system crashes and server shutdowns, nor it needs to be explicitly
destroyed by the caller. It goes out of scope by invoking the "submit" method,
which effectively passes the control to the Job manager. The owner of the
archive selection resource is thus the P5 system, so the caller needs not (nor
it should) perform any other task with the same resource.
Usage:
To use the ArchiveSelection resource, use the create method to create a new
instance. After creation, use the addentry and/or adddirectory methods to
fill-in the selection with files and/or directories to archive. Finally, submit
the selection for immediate or scheduled execution. After submission, the
resource goes out of scope and should not be used any more.
"""
from awp5.base.connection import P5Resource, exec_nsdchat
from awp5.base.helpers import resourcelist, onereturnvalue
from awp5.api.archiveentry import ArchiveEntry
from awp5.api.archiveplan import ArchivePlan
from awp5.api.job import Job
module_name = "ArchiveSelection"
@onereturnvalue
def create(client, plan, indexroot=None, as_object=False,
p5_connection=None):
"""
Syntax: ArchiveSelection create <client> <plan> [<indexroot>]
Description: Creates a new temporary archive selection resource. The
resource will be automatically deleted after the associated archive job has
been submitted.
The <client> must be the one of the registered client computers on the
current P5 server. You can get the list of client computers with the Client
names CLI command. All files added with the addentry method (below) must
reside on this client.
The <plan> must be one of the registered archive plans. You can get the
list of archive plans with the ArchivePlan names CLI command.
The optional <indexroot> argument, if given, will force all files in the
archive selection to be indexed under the <indexroot> path.
Return Values:
-On Success: the name of the new resource. Use this name to
address this resource in all other methods.
"""
method_name = "create"
result = exec_nsdchat([module_name, method_name, client, plan, indexroot],
p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveSelection, p5_connection)
@onereturnvalue
def addfrom(archiveselection_name, inputfile, outputfile, p5_connection=None):
"""
Syntax: ArchiveSelection <name> addfrom <input file> <output file>
Description: Loads the Archive Selection entries from the external file
<input file>. The file must be formatted with one entry per line, each
entry in the format of:
<path>TAB<key1>TAB<value1>TAB<key2>TAB<value2>...
The <path> needs to be resolvable on the client for which the selection is
created and the <input file> needs to reside on that client.
The <path> may be followed by zero or more key/value pairs representing
metadata that will be assigned to the file. All keys must be known in the
index referenced by the archive selection. Unknown keys will be silently
skipped.
The <output file> is created by this command, it contains all accepted
files with their ArchiveEntry handles used to reference the files later.
The file format is one file per line in the format of:
<path>TAB<handle>
Note that unlike ArchiveSelection addentry, this method will add folders as
empty nodes. This means:
- folders are added without content, metadata in that case is assigned
only to the folder
- If files are added into a non existing folder in the archive, the
folder is created without attributes or metadata.
Return Values:
-On Success: the number of added key/value pairs
"""
method_name = "addfrom"
return exec_nsdchat([module_name, archiveselection_name, method_name,
inputfile, outputfile], p5_connection)
@onereturnvalue
def addentry(archiveselection_name, path, key_value_list=None, as_object=False,
p5_connection=None):
"""
Syntax: ArchiveSelection <name> addentry <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a single new <path> to the archive selection <name>. It
expects the absolute path to the file or directory to be archived. The file
or directory must be located on the client <client> given at the resource
creation time (see the create method).
The path will be stripped of the leading directory part and the name will
be inserted into the index at the indexroot destination as defined in
create. If the passed <path> contains blanks, be sure to enclose it in
curly braces: {/some/path with blanks/file}. Furthermore, if the <path>
contains { and/or } chars themselves, you must escape them with a backslash
'\' character.
To each path, you can assign an arbitrary number of <key> and <value>
pairs. Those are saved in the archive index and can be used for searches
during restore (see RestoreSelection).
Each key allows a string value of unlimited length. If the value contains
blanks, it should be enclosed in curly braces. If the value itself contains
curly braces, you must escape them with '\' character.
In case the ArchiveSelection is set to incremental level and the given
entry is already part of the Archive, the entry is not added and an string
string <empty> is returned.
Return Values:
-On Success: the name of the new ArchiveEntry resource.
This name must be used with ArchiveEntry methods
to get the status and other meta-information for the
entry after the archive operation has been completed.
Please see the ArchiveEntry resource description
"""
method_name = "addentry"
result = exec_nsdchat([module_name, archiveselection_name, method_name,
path, key_value_list], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, p5_connection)
@onereturnvalue
def addentryabs(archiveselection_name, path, key_value_list=None,
as_object=False, p5_connection=None):
"""
Syntax: ArchiveSelection <name> addentryabs <path>
[<key> <value> [<key> <value>].. ]
Description: Adds one new <path> to the archive selection <name>. It
expects the absolute path to the file or directory to be archived. The file
or directory must be located on the client <client> given at the resource
creation time (see the create method).
The entry path will be added 1:1 into the index. Any prefixes and
alternative index destinations are ignored. If the passed <path> contains
blanks, be sure to enclose it in curly braces:
{/some/path with blanks/file}. Furthermore, if the <path> contains
{ and/or } chars themselves, you must escape them with a backslash '\'
character.
To each path, you can assign an arbitrary number of <key> and <value>
pairs. Those are saved in the archive index and can be used for searches
during restore (see RestoreSelection).
Each key allows a string value of unlimited length. If the value contains
blanks, it should be enclosed in curly braces. If the value itself contains
curly braces, you must escape them with '\' character.
Return Values:
-On Success: the name of the new ArchiveEntry resource.
This name must be used with ArchiveEntry methods
to get the status and other meta-information of the
entry after the archive operation has been completed.
Please see the ArchiveEntry resource description
"""
method_name = "addentryabs"
result = exec_nsdchat([module_name, archiveselection_name, method_name,
path, key_value_list], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, p5_connection)
@onereturnvalue
def adddirectory(archiveselection_name, path, key_value_list=None,
as_object=False, p5_connection=None):
"""
Syntax: ArchiveSelection <name> adddirectory <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a new directory <path> to the archive selection <name>.
It expects the absolute path to the directory to be archived. The directory
must be located on the client <client> given at the resource creation time
(see the create method).
The path will be stripped of the leading directory part and the name will
be inserted into the index at the indexroot destination as defined in
create.
Note that this method will only add the directory node to the archive
selection and that only a directory node itself will be archived. If you
want to archive both the directory and its contents recursively, use the
ArchiveSelection addentry method.
See the addentry method description for explanation of other method
arguments.
Return Values:
-On Success: see the addentry description for return values
"""
method_name = "adddirectory"
result = exec_nsdchat([module_name, archiveselection_name, method_name,
path, key_value_list], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, p5_connection)
@onereturnvalue
def adddirectoryabs(archiveselection_name, path, key_value_list=None,
as_object=False, p5_connection=None):
"""
Syntax: ArchiveSelection <name> adddirectoryabs <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a new directory <path> to the archive selection <name>.
It expects the absolute path to the directory to be archived. The directory
must be located on the client <client> given at the resource creation time
(see the create method).
The directory path will be added 1:1 into the index. Any prefixes and
alternative index destinations are ignored.
Note that this method will only add the directory node to the archive
selection and that only a directory node itself will be archived. If you
want to archive both the directory and its contents recursively, use the
ArchiveSelection addentry method.
See the addentry method description for explanation of other method
arguments.
Return Values:
-On Success: see the addentry method for return values
"""
method_name = "adddirectoryabs"
result = exec_nsdchat([module_name, archiveselection_name, method_name,
path, key_value_list], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, p5_connection)
@onereturnvalue
def addfile(archiveselection_name, path, key_value_list=None,
as_object=False, p5_connection=None):
"""
Syntax: ArchiveSelection <name> addfile <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a new file <path> to the archive selection <name>. It
expects the absolute path to the file to be archived. The file must be
located on the client <client> given at the resource creation time (see the
create method).
The path will be stripped of the leading directory part and the name will
be inserted into the index at the indexroot destination as defined in
create.
See the addentry method description for explanation of other method
arguments.
Return Values:
-On Success: see the addentry method for return values
"""
method_name = "addfile"
result = exec_nsdchat([module_name, archiveselection_name, method_name,
path, key_value_list], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, p5_connection)
@onereturnvalue
def addfileabs(archiveselection_name, path, key_value_list=None,
as_object=False, p5_connection=None):
"""
Syntax: ArchiveSelection <name> addfileabs <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a new file <path> to the archive selection <name>. It
expects the absolute path to the file to be archived. The file must be
located on the client <client> given at the resource creation time (see the
create method).
The directory path will be added 1:1 into the index. Any prefixes and
alternative index destinations are ignored.
See the addentry method description for explanation of other method
arguments.
Return Values:
-On Success: see the addentry method for return values
"""
method_name = "addfileabs"
result = exec_nsdchat([module_name, archiveselection_name, method_name,
path, key_value_list], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, p5_connection)
@onereturnvalue
def describe(archiveselection_name, title=None, p5_connection=None):
"""
Syntax: ArchiveSelection <name> describe [title]
Description: If a title is given, the title is set as the description in
the job monitor.
The method returns the current description
Return Values:
-On Success: the descriptions string as used in the job monitor
"""
method_name = "describe"
return exec_nsdchat([module_name, archiveselection_name, method_name,
title], p5_connection)
@onereturnvalue
def destroy(archiveselection_name, p5_connection=None):
"""
Syntax: ArchiveSelection <name> destroy
Description: Explicitly destroys the archive selection. The <name> should
not be used in any ArchiveSelection commands afterwards.
Return Values:
-On Success: the string "0" (destroyed)
the string "1" (not destroyed)
"""
method_name = "destroy"
return exec_nsdchat([module_name, archiveselection_name, method_name],
p5_connection)
@onereturnvalue
def entries(archiveselection_name, p5_connection=None):
"""
Syntax: ArchiveSelection <name> entires
Description: Returns the number of entries in the selection object.
Return Values:
-On Success: the number of entries
"""
method_name = "size"
return exec_nsdchat([module_name, archiveselection_name, method_name],
p5_connection)
@onereturnvalue
def level(archiveselection_name, level_value=None, p5_connection=None):
"""
Syntax: ArchiveSelection <name> [level]
Description: Returns the level of the ArchiveSelection.
If the optional level value is given, that level is set.
The level must be either “full” or “increment”.
Return Values:
-On Success: the string “full” or “increment”
"""
method_name = "level"
return exec_nsdchat([module_name, archiveselection_name, method_name,
level_value], p5_connection)
@onereturnvalue
def size(archiveselection_name, p5_connection=None):
"""
Syntax: ArchiveSelection <name> size
Description: Returns the number of entries in the selection object.
This method is deprecated, please use ArchiveSelection entries instead.
Return Values:
-On Success: the number of entries
"""
method_name = "size"
return exec_nsdchat([module_name, archiveselection_name, method_name],
p5_connection)
@onereturnvalue
def submit(archiveselection_name, now=True, as_object=False,
p5_connection=None):
"""
Syntax: ArchiveSelection <name> submit [<now>]
Description: Submits the archive selection for execution. You can
optionally override plan execution times by giving the <now> as one of the
strings "1", "t", "true", "True", "y", "yes", or "Yes".
This command implicitly destroys the ArchiveSelection object for the user
and transfers the ownership of the internal underlying object to the job
scheduler. You should not attempt to use the <name> afterwards.
Return Values:
-On Success: the archive job ID. Use this job ID to query the
status of the job by using Job resource.
Please see the Job resource description for details.
"""
method_name = "submit"
now_option = ""
if now is True:
now_option = "1"
result = exec_nsdchat([module_name, archiveselection_name, method_name,
now_option], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, Job, p5_connection)
@onereturnvalue
def onjobactivation(archiveselection_name, p5_connection=None, command=None):
"""
Syntax: ArchiveSelection <name> onjobactivation <command>]
Description: Registers the <command> to be executed just before the job is
started by the submit method. The command itself can be any valid OS
command plus variable number of arguments.
The very first argument of the command (the program itself) can be
prepended with the name of the P5 client where the command is to be
executed on. If omitted, the command will be executed on the client which
the ArchiveSelection object is created for.
Examples:
ArchiveSelection 10002 onjobactivation "mickey:/var/scripts/myscript arg"
will execute /var/scripts/myscript on the client "mickey" regardless of the
client the ArchiveSelection is created for. The program will be passed one
argument: arg.
ArchiveSelection 10002 onjobactivation "/var/scripts/myscript"
will execute /var/scripts/myscript on the client the ArchiveSelection is
created for.
ArchiveSelection 10002 onjobactivation "localhost:/var/scripts/myscript"
will execute /var/scripts/myscript on the P5 server.
Return Values:
-On Success: the command string
"""
method_name = "onjobactivation"
return exec_nsdchat([module_name, archiveselection_name, method_name,
command], p5_connection)
@onereturnvalue
def onjobcompletion(archiveselection_name, p5_connection=None, command=None):
"""
Syntax: ArchiveSelection <name> onjobcompletion <command>
Description: Registers the <command> to be executed immediately after the
job created by the submit method is completed. See onjobactivation for
further information.
Return Values:
-On Success: the command string
"""
method_name = "onjobcompletion"
return exec_nsdchat([module_name, archiveselection_name, method_name,
command], p5_connection)
@onereturnvalue
def onfiledeletion(archiveselection_name, p5_connection=None, command=None):
"""
Syntax: ArchiveSelection <name> onfiledeletion <command>
Description: Registers the <command> to be executed immediately after the
files are deleted through a job created by the submit method. See
onjobactivation for further information.
Return Values:
-On Success: the command string
"""
method_name = "onfiledeletion"
return exec_nsdchat([module_name, archiveselection_name, method_name,
command], p5_connection)
class ArchiveSelection(P5Resource):
def __init__(self, archiveselection_name, p5_connection=None):
super().__init__(archiveselection_name, p5_connection)
@onereturnvalue
def create(client, plan, indexroot=None, as_object=True, p5_connection=None):
"""
Syntax: ArchiveSelection create <client> <plan> [<indexroot>]
Description: Creates a new temporary archive selection resource. The
resource will be automatically deleted after the associated archive job
has been submitted.
The <client> must be the one of the registered client computers on the
current P5 server. You can get the list of client computers with the
Client names CLI command. All files added with the addentry method
(below) must reside on this client.
The <plan> must be one of the registered archive plans. You can get the
list of archive plans with the ArchivePlan names CLI command.
The optional <indexroot> argument, if given, will force all files in
the archive selection to be indexed under the <indexroot> path.
Return Values:
-On Success: the name of the new resource. Use this name to
address this resource in all other methods.
"""
method_name = "create"
result = exec_nsdchat([module_name, method_name, client, plan,
indexroot], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveSelection, p5_connection)
@onereturnvalue
def addfrom(self, inputfile, outputfile):
"""
Syntax: ArchiveSelection <name> addfrom <input file> <output file>
Description: Loads the Archive Selection entries from the external file
<input file>. The file must be formatted with one entry per line, each
entry in the format of:
<path>TAB<key1>TAB<value1>TAB<key2>TAB<value2>...
The <path> needs to be resolvable on the client for which the selection
is created and the <input file> needs to reside on that client.
The <path> may be followed by zero or more key/value pairs representing
metadata that will be assigned to the file. All keys must be known in
the index referenced by the archive selection. Unknown keys will be
silently skipped.
The <output file> is created by this command, it contains all accepted
files with their ArchiveEntry handles used to reference the files
later. The file format is one file per line in the format of:
<path>TAB<handle>
Note that unlike ArchiveSelection addentry, this method will add
folders as empty nodes. This means:
- folders are added without content, metadata in that case is
assigned only to the folder
- If files are added into a non existing folder in the archive, the
folder is created without attributes or metadata.
Return Values:
-On Success: the number of added key/value pairs
"""
method_name = "addfrom"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, inputfile,
outputfile])
@onereturnvalue
def addentry(self, path, key_value_list=None, as_object=True):
"""
Syntax: ArchiveSelection <name> addentry <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a single new <path> to the archive selection <name>.
It expects the absolute path to the file or directory to be archived.
The file or directory must be located on the client <client> given at
the resource creation time (see the create method).
The path will be stripped of the leading directory part and the name
will be inserted into the index at the indexroot destination as defined
in create.
If the passed <path> contains blanks, be sure to enclose it in curly
braces: {/some/path with blanks/file}. Furthermore, if the <path>
contains { and/or } chars themselves, you must escape them with a
backslash '\' character.
To each path, you can assign an arbitrary number of <key> and <value>
pairs. Those are saved in the archive index and can be used for
searches during restore (see RestoreSelection).
Each key allows a string value of unlimited length. If the value
contains blanks, it should be enclosed in curly braces. If the value
itself contains curly braces, you must escape them with '\' character.
In case the ArchiveSelection is set to incremental level and the given
entry is already part of the Archive, the entry is not added and an
empty string is returned.
Return Values:
-On Success: the name of the new ArchiveEntry resource.
This name must be used with ArchiveEntry methods
to get the status and other meta-information for the
entry after the archive operation has been completed.
Please see the ArchiveEntry resource description
"""
method_name = "addentry"
result = self.p5_connection.nsdchat_call([module_name, self.name,
method_name, path,
key_value_list])
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, self.p5_connection)
@onereturnvalue
def addentryabs(self, path, key_value_list=None, as_object=True):
"""
Syntax: ArchiveSelection <name> addentryabs <path>
[<key> <value> [<key> <value>].. ]
Description: Adds one new <path> to the archive selection <name>. It
expects the absolute path to the file or directory to be archived. The
file or directory must be located on the client <client> given at the
resource creation time (see the create method).
The entry path will be added 1:1 into the index. Any prefixes and
alternative index destinations are ignored.
If the passed <path> contains blanks, be sure to enclose it in curly
braces: {/some/path with blanks/file}. Furthermore, if the <path>
contains { and/or } chars themselves, you must escape them with a
backslash '\' character.
To each path, you can assign an arbitrary number of <key> and <value>
pairs. Those are saved in the archive index and can be used for
searches during restore (see RestoreSelection).
Each key allows a string value of unlimited length. If the value
contains blanks, it should be enclosed in curly braces. If the value
itself contains curly braces, you must escape them with '\' character.
Return Values:
-On Success: the name of the new ArchiveEntry resource.
This name must be used with ArchiveEntry methods
to get the status and other meta-information of the
entry after the archive operation has been completed.
Please see the ArchiveEntry resource description
"""
method_name = "addentryabs"
result = self.p5_connection.nsdchat_call([module_name, self.name,
method_name, path,
key_value_list])
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, self.p5_connection)
@onereturnvalue
def adddirectory(self, path, key_value_list=None, as_object=True):
"""
Syntax: ArchiveSelection <name> adddirectory <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a new directory <path> to the archive selection
<name>. It expects the absolute path to the directory to be archived.
The directory must be located on the client <client> given at the
resource creation time (see the create method).
The path will be stripped of the leading directory part and the name
will be inserted into the index at the indexroot destination as defined
in create.
Note that this method will only add the directory node to the archive
selection and that only a directory node itself will be archived. If
you want to archive both the directory and its contents recursively,
use the ArchiveSelection addentry method.
See the addentry method description for explanation of other method
arguments.
Return Values:
-On Success: see the addentry description for return values
"""
method_name = "adddirectory"
result = self.p5_connection.nsdchat_call([module_name, self.name,
method_name, path,
key_value_list])
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, self.p5_connection)
@onereturnvalue
def adddirectoryabs(self, path, key_value_list=None, as_object=True):
"""
Syntax: ArchiveSelection <name> adddirectoryabs <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a new directory <path> to the archive selection
<name>. It expects the absolute path to the directory to be archived.
The directory must be located on the client <client> given at the
resource creation time (see the create method).
The directory path will be added 1:1 into the index. Any prefixes and
alternative index destinations are ignored.
Note that this method will only add the directory node to the archive
selection and that only a directory node itself will be archived. If
you want to archive both the directory and its contents recursively,
use the ArchiveSelection addentry method.
See the addentry method description for explanation of other method
arguments.
Return Values:
-On Success: see the addentry method for return values
"""
method_name = "adddirectoryabs"
result = self.p5_connection.nsdchat_call([module_name, self.name,
method_name, path,
key_value_list])
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, self.p5_connection)
@onereturnvalue
def addfile(self, path, key_value_list=None, as_object=True):
"""
Syntax: ArchiveSelection <name> addfile <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a new file <path> to the archive selection <name>. It
expects the absolute path to the file to be archived. The file must be
located on the client <client> given at the resource creation time (see
the create method).
The path will be stripped of the leading directory part and the name
will be inserted into the index at the indexroot destination as defined
in create.
See the addentry method description for explanation of other method
arguments.
Return Values:
-On Success: see the addentry method for return values
"""
method_name = "addfile"
result = self.p5_connection.nsdchat_call([module_name, self.name,
method_name, path,
key_value_list])
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, self.p5_connection)
@onereturnvalue
def addfileabs(self, path, key_value_list=None, as_object=True):
"""
Syntax: ArchiveSelection <name> addfileabs <path>
[<key> <value> [<key> <value>].. ]
Description: Adds a new file <path> to the archive selection <name>. It
expects the absolute path to the file to be archived. The file must be
located on the client <client> given at the resource creation time
(see the create method).
The directory path will be added 1:1 into the index. Any prefixes and
alternative index destinations are ignored.
See the addentry method description for explanation of other method
arguments.
Return Values:
-On Success: see the addentry method for return values
"""
method_name = "addfileabs"
result = self.p5_connection.nsdchat_call([module_name, self.name,
method_name, path,
key_value_list])
if not as_object:
return result
else:
return resourcelist(result, ArchiveEntry, self.p5_connection)
@onereturnvalue
def describe(self, title=None):
"""
Syntax: ArchiveSelection <name> describe [title]
Description: If a title is given, the title is set as the description
in the job monitor. The method returns the current description
Return values:
-On success: the descriptions string as used in the job monitor
"""
method_name = "describe"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, title])
@onereturnvalue
def destroy(self):
"""
Syntax: ArchiveSelection <name> destroy
Description: Explicitly destroys the archive selection. The <name>
should not be used in any ArchiveSelection commands afterwards.
Return Values:
-On Success: the string "0" (destroyed)
the string "1" (not destroyed)
"""
method_name = "destroy"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name])
@onereturnvalue
def entries(self):
"""
Syntax: ArchiveSelection <name> entries
Description: Returns the number of entries in the selection object.
Return Values:
-On Success: the number of entries
"""
method_name = "entries"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name])
@onereturnvalue
def level(self, level_value=None):
"""
Syntax: ArchiveSelection <name> [level]
Description: Returns the level of the ArchiveSelection.
If the optional level value is given, that level is set.
The level must be either “full” or “increment”.
Return Values:
-On Success: the string “full” or “increment”
"""
method_name = "level"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, level_value])
@onereturnvalue
def size(self):
"""
Syntax: ArchiveSelection <name> size
Description: Returns the number of entries in the selection object.
This method is deprecated, please use ArchiveSelection entries instead.
Return Values:
-On Success: the number of entries
"""
method_name = "size"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name])
@onereturnvalue
def submit(self, now=True, as_object=True):
"""
Syntax: ArchiveSelection <name> submit [<now>]
Description: Submits the archive selection for execution. You can
optionally override plan execution times by giving the <now> as one of
the strings "1", "t", "true", "True", "y", "yes", or "Yes".
This command implicitly destroys the ArchiveSelection object for the
user and transfers the ownership of the internal underlying object to
the job scheduler. You should not attempt to use the <name> afterwards.
Return Values:
-On Success: the archive job ID. Use this job ID to query the
status of the job by using Job resource.
Please see the Job resource description for details.
"""
method_name = "submit"
now_option = ""
if now is True:
now_option = "1"
result = self.p5_connection.nsdchat_call([module_name, self.name,
method_name, now_option])
if not as_object:
return result
else:
return resourcelist(result, Job, self.p5_connection)
@onereturnvalue
def onjobactivation(self, command=None):
"""
Syntax: ArchiveSelection <name> onjobactivation <command>]
Description: Registers the <command> to be executed just before the job
is started by the submit method. The command itself can be any valid OS
command plus variable number of arguments.
The very first argument of the command (the program itself) can be
prepended with the name of the P5 client where the command is to be
executed on.
If omitted, the command will be executed on the client which the
ArchiveSelection object is created for.
Examples:
ArchiveSelection 10002 onjobactivation
"mickey:/var/scripts/myscript arg"
will execute /var/scripts/myscript on the client "mickey" regardless of
the client the ArchiveSelection is created for. The program will be
passed one argument: arg.
ArchiveSelection 10002 onjobactivation "/var/scripts/myscript"
will execute /var/scripts/myscript on the client the ArchiveSelection
is created for.
ArchiveSelection 10002 onjobactivation
"localhost:/var/scripts/myscript"
will execute /var/scripts/myscript on the P5 server.
Return Values:
-On Success: the command string
"""
method_name = "onjobactivation"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, command])
@onereturnvalue
def onjobcompletion(self, command=None):
"""
Syntax: ArchiveSelection <name> onjobcompletion <command>
Description: Registers the <command> to be executed immediately after
the job created by the submit method is completed. See onjobactivation
for further information.
Return Values:
-On Success: the command string
"""
method_name = "onjobcompletion"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, command])
@onereturnvalue
def onfiledeletion(self, command=None):
"""
Syntax: ArchiveSelection <name> onfiledeletion <command>
Description: Registers the <command> to be executed immediately after
the files are deleted through a job created by the submit method. See
onjobactivation for further information.
Return Values:
-On Success: the command string
"""
method_name = "onjobcompletion"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, command])
def __repr__(self):
return ": ".join([module_name, self.name])
| 46.714119
| 81
| 0.662162
| 4,973
| 40,034
| 5.261211
| 0.075608
| 0.032105
| 0.016511
| 0.027289
| 0.929751
| 0.915189
| 0.910526
| 0.909303
| 0.904181
| 0.890957
| 0
| 0.004981
| 0.277939
| 40,034
| 856
| 82
| 46.768692
| 0.900128
| 0.619049
| 0
| 0.770677
| 0
| 0
| 0.027296
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135338
| false
| 0
| 0.018797
| 0.003759
| 0.349624
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1a51855d79e4bfe00d93ac71309327a37fc43997
| 11,679
|
py
|
Python
|
src/webapi/models.py
|
kumagallium/labmine-api
|
074e3b9a8665ce9e176da46fdd9ad91dc0734682
|
[
"MIT"
] | null | null | null |
src/webapi/models.py
|
kumagallium/labmine-api
|
074e3b9a8665ce9e176da46fdd9ad91dc0734682
|
[
"MIT"
] | null | null | null |
src/webapi/models.py
|
kumagallium/labmine-api
|
074e3b9a8665ce9e176da46fdd9ad91dc0734682
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
from account.models import User
from django_mysql.models import JSONField, Model
class Post(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
published_at = models.DateTimeField(blank = True, null = True)
def publish(self):
self.published_at = timezone.now()
self.save()
def __str__(self):
return self.title
class Project(models.Model):
project_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.project_name
class Blueprint(models.Model):
flowdata = JSONField()
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Template(models.Model):
template_name = models.CharField(max_length=255, unique=True)
blueprint = models.ForeignKey(Blueprint, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Experiment(models.Model):
title = models.CharField(max_length=255, unique=True)
blueprint = models.ForeignKey(Blueprint, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Library(models.Model):
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Tag(models.Model):
tag_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.tag_name
class Pin(models.Model):
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE)
tag = models.ForeignKey(Tag, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Type(models.Model):
type_name = models.CharField(max_length=255, unique=True)
concept = models.IntegerField(default=2)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.type_name
class Node(models.Model):
node_name = models.CharField(max_length=255)
typeid = models.ForeignKey(Type, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
node_image = models.ImageField(upload_to='images/',default='images/node_default.png')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.node_name
class Entity(models.Model):
node = models.ForeignKey(Node, on_delete=models.PROTECT,null=True,blank=True)
boxid = models.CharField(max_length=255)
blueprint = models.ForeignKey(Blueprint, on_delete=models.PROTECT)
is_finished = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
finished_at = models.DateTimeField(blank = True, null = True)
def finished(self):
self.finished_at = timezone.now()
self.save()
class Property(models.Model):
property_name = models.CharField(max_length=255, default="")
official = models.BooleanField(default=False)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.property_name
class Unit(models.Model):
symbol = models.CharField(max_length=255, default="")
base = models.BooleanField(default=False)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.symbol
class Quantity(models.Model):
unit = models.ForeignKey(Unit, on_delete=models.CASCADE)
property = models.ForeignKey(Property, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Figure(models.Model):
node = models.ForeignKey(Node, on_delete=models.CASCADE)
figure_name = models.CharField(max_length=255)
property_x = models.ForeignKey(Property, on_delete=models.PROTECT, related_name="property_x", blank = True, null = True)
property_y = models.ForeignKey(Property, on_delete=models.PROTECT, related_name="property_y", blank = True, null = True)
property_z = models.ForeignKey(Property, on_delete=models.PROTECT, related_name="property_z", blank = True, null = True)
datatype = models.IntegerField(default=0)
is_condition = models.BooleanField(default=False)#将来的に廃止予定
cluster = models.IntegerField(default=2)
editor = models.ForeignKey(User, on_delete=models.PROTECT,blank = True, null = True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.figure_name
class Datum(models.Model):
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
unit_x = models.ForeignKey(Unit, on_delete=models.PROTECT, related_name="unit_x", blank = True, null = True)
unit_y = models.ForeignKey(Unit, on_delete=models.PROTECT, related_name="unit_y", blank = True, null = True)
unit_z = models.ForeignKey(Unit, on_delete=models.PROTECT, related_name="unit_z", blank = True, null = True)
figure = models.ForeignKey(Figure, on_delete=models.PROTECT)
data = JSONField()
editor = models.ForeignKey(User, on_delete=models.PROTECT, related_name="editor")
is_deleted = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Metakey(models.Model):
key_name = models.CharField(max_length=255)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.key_name
class Product(models.Model):
product_name = models.CharField(max_length=255)
experiment = models.ForeignKey(Experiment, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.product_name
class Definition(models.Model):
product = models.ForeignKey(Product, on_delete=models.PROTECT)
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Image(models.Model):
image_name = models.CharField(max_length=255)
image = models.ImageField(upload_to='images/')
cluster = models.IntegerField(default=2)
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.image_name
class Video(models.Model):
video_name = models.CharField(max_length=255)
video_url = models.TextField()
cluster = models.IntegerField(default=2)
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.video_name
class Item(models.Model):
item_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.item_name
class Metadata(models.Model):
figure = models.ForeignKey(Node, on_delete=models.PROTECT)
item = models.ForeignKey(Item, on_delete=models.PROTECT)
values = JSONField()
editor = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.value
class Detail(models.Model):
detail_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.detail_name
class Description(models.Model):
values = JSONField()
is_condition = models.BooleanField(default=False)#将来的に廃止予定
cluster = models.IntegerField(default=2)
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Default(models.Model):
node = models.ForeignKey(Node, on_delete=models.PROTECT)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
is_condition = models.BooleanField(default=False)#将来的に廃止予定
cluster = models.IntegerField(default=2)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Headline(models.Model):
headline_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.headline_name
class Sentence(models.Model):
value = models.TextField()
headline = models.ForeignKey(Headline, on_delete=models.PROTECT)
entity = models.ForeignKey(Entity, on_delete=models.CASCADE)
cluster = models.IntegerField(default=2)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Explanation(models.Model):
value = models.TextField()
headline = models.ForeignKey(Headline, on_delete=models.PROTECT)
figure = models.ForeignKey(Figure, on_delete=models.PROTECT)
cluster = models.IntegerField(default=2)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| 41.268551
| 124
| 0.749979
| 1,512
| 11,679
| 5.562169
| 0.07209
| 0.057075
| 0.149822
| 0.172414
| 0.841855
| 0.813912
| 0.76956
| 0.752794
| 0.709512
| 0.695957
| 0
| 0.006313
| 0.145475
| 11,679
| 282
| 125
| 41.414894
| 0.836373
| 0.002055
| 0
| 0.542735
| 0
| 0
| 0.00781
| 0.001974
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081197
| false
| 0
| 0.017094
| 0.07265
| 0.982906
| 0.017094
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
204250a0839a8e3c3f4b57793d91cd03d650cd6f
| 122
|
py
|
Python
|
latte/monkey_patches/frappe/desk/form/save.py
|
sunnyakaxd/latte
|
de74065122a1f858bd75f8e1a36fca3b23981f4c
|
[
"MIT"
] | null | null | null |
latte/monkey_patches/frappe/desk/form/save.py
|
sunnyakaxd/latte
|
de74065122a1f858bd75f8e1a36fca3b23981f4c
|
[
"MIT"
] | null | null | null |
latte/monkey_patches/frappe/desk/form/save.py
|
sunnyakaxd/latte
|
de74065122a1f858bd75f8e1a36fca3b23981f4c
|
[
"MIT"
] | null | null | null |
import frappe.desk.form.save
from latte.overrides.desk.form.save import savedocs
frappe.desk.form.save.savedocs = savedocs
| 40.666667
| 51
| 0.836066
| 19
| 122
| 5.368421
| 0.473684
| 0.235294
| 0.352941
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 122
| 3
| 52
| 40.666667
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
6488830c95aaab8cf932b8f067e7cffe7f992586
| 187
|
py
|
Python
|
tests/optical_indexes_tests/optical_indexes_test.py
|
PyDEF2/PyDEF-2.0
|
71afd074c2a133e92fa55af214bda7d5250bc919
|
[
"MIT"
] | 13
|
2018-11-01T10:52:14.000Z
|
2022-03-13T06:16:58.000Z
|
tests/optical_indexes_tests/optical_indexes_test.py
|
PyDEF2/PyDEF-2.0
|
71afd074c2a133e92fa55af214bda7d5250bc919
|
[
"MIT"
] | null | null | null |
tests/optical_indexes_tests/optical_indexes_test.py
|
PyDEF2/PyDEF-2.0
|
71afd074c2a133e92fa55af214bda7d5250bc919
|
[
"MIT"
] | 13
|
2018-11-07T07:32:31.000Z
|
2021-03-04T04:26:16.000Z
|
import pydef_core.optical_indices as oi
a = oi.OpticalIndices('./tests/test_files/Optical_indexes/OUTCAR')
b = oi.OpticalIndices('./tests/test_files/Optical_indexes/OUTCAR-1')
a.plot()
| 26.714286
| 68
| 0.786096
| 28
| 187
| 5.035714
| 0.607143
| 0.22695
| 0.297872
| 0.35461
| 0.70922
| 0.70922
| 0.70922
| 0.70922
| 0
| 0
| 0
| 0.005747
| 0.069519
| 187
| 6
| 69
| 31.166667
| 0.804598
| 0
| 0
| 0
| 0
| 0
| 0.449198
| 0.449198
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
64acd0fd47a92670e2d9fb9f7ddb1b1bda8dff59
| 154
|
py
|
Python
|
example/src/identity.py
|
SeanMabli/aiinpy
|
bd332fce454c489e236878c9da91bb86ec6dda14
|
[
"MIT"
] | null | null | null |
example/src/identity.py
|
SeanMabli/aiinpy
|
bd332fce454c489e236878c9da91bb86ec6dda14
|
[
"MIT"
] | null | null | null |
example/src/identity.py
|
SeanMabli/aiinpy
|
bd332fce454c489e236878c9da91bb86ec6dda14
|
[
"MIT"
] | null | null | null |
class identity:
def __repr__(self):
return 'identity()'
def forward(self, input):
return input
def backward(self, input):
return 1
| 17.111111
| 28
| 0.642857
| 19
| 154
| 5
| 0.526316
| 0.231579
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008696
| 0.253247
| 154
| 9
| 29
| 17.111111
| 0.817391
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.428571
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
b38bc031b616697f68a6e4d5ec120211c01b19a0
| 184
|
py
|
Python
|
brainframe_qt/ui/dialogs/license_dialog/widgets/__init__.py
|
aotuai/brainframe-qt
|
082cfd0694e569122ff7c63e56dd0ec4b62d5bac
|
[
"BSD-3-Clause"
] | 17
|
2021-02-11T18:19:22.000Z
|
2022-02-08T06:12:50.000Z
|
brainframe_qt/ui/dialogs/license_dialog/widgets/__init__.py
|
aotuai/brainframe-qt
|
082cfd0694e569122ff7c63e56dd0ec4b62d5bac
|
[
"BSD-3-Clause"
] | 80
|
2021-02-11T08:27:31.000Z
|
2021-10-13T21:33:22.000Z
|
brainframe_qt/ui/dialogs/license_dialog/widgets/__init__.py
|
aotuai/brainframe-qt
|
082cfd0694e569122ff7c63e56dd0ec4b62d5bac
|
[
"BSD-3-Clause"
] | 5
|
2021-02-12T09:51:34.000Z
|
2022-02-08T09:25:15.000Z
|
from .product_sidebar.product_widget import ProductWidget
from .product_sidebar.product_sidebar_widget import ProductSidebar
from .brainframe_license.license_terms import LicenseTerms
| 46
| 66
| 0.902174
| 22
| 184
| 7.227273
| 0.5
| 0.264151
| 0.226415
| 0.314465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 184
| 3
| 67
| 61.333333
| 0.924419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
373e9b5f67e8a511215051688a31a910ec68742b
| 181,899
|
py
|
Python
|
tsfm/MolecularInformation.py
|
tlawrence3/bplogofuntest
|
26b90eb9ec604f73e2f5df3548646906bf9f6a6d
|
[
"MIT"
] | null | null | null |
tsfm/MolecularInformation.py
|
tlawrence3/bplogofuntest
|
26b90eb9ec604f73e2f5df3548646906bf9f6a6d
|
[
"MIT"
] | 7
|
2019-01-18T03:41:16.000Z
|
2019-06-29T01:56:32.000Z
|
tsfm/MolecularInformation.py
|
tlawrence3/tsfm
|
26b90eb9ec604f73e2f5df3548646906bf9f6a6d
|
[
"MIT"
] | 2
|
2017-10-05T18:11:06.000Z
|
2019-01-11T15:13:28.000Z
|
# -*- coding: utf-8 -*-
"""This module contains classes for calculating functional molecular information statistics.
"""
from collections import Counter, defaultdict
from multiprocessing import Pool
from operator import itemgetter
from string import Template
from ast import literal_eval as make_tuple
import os
import bisect
import pkgutil
import itertools
import sys
import random
import time
import glob
import math as mt
import re
import numpy as np
import statsmodels.stats.multitest as smm
import pandas as pd
import tsfm.nsb_entropy as nb
import tsfm.exact as exact
import warnings
from operator import truediv
from scipy import stats
from scipy.stats import genpareto
from tsfm import ad_test
from scipy.stats import norm
class DistanceCalculator:
"""A `DistanceCalculator` object contains methods for calculating several pairwise distance metrics between function logos.
Currently, a `DistanceCalculator` object can calculate pairwise distance using the square-root of the Jensen-Shannon
divergence and will print the resulting distance matrix to stdout.
Args:
distance (str): Indicates which distance metric to use for pairwise calculations.
Attributes:
distanceMetric (str): Indicates the distance metric to be used in
pairwise calculations.
featureSet (:obj:`set` of :obj:`str`): A :obj:`set` of the structural
features contained in the function logos being compared (e.g. 1A, 173AU).
functionSet (:obj:`set` of :obj:`str`): A :obj:`set` of the functional
classes contained in the function logos being compared.
Example::
x = tsfm.MolecularInformation.DistanceCalculator('jsd')
x.get_distance(function_logos)
"""
def __init__(self, distance):
"""The initialization of a `DistanceCalculator` object requires a :str: indicating the distance metric to be used.
"""
self.distanceMetric = distance
self.featureSet = set()
self.functionSet = set()
def get_distance(self, ResultsDict):
"""
Prints a pairwise distance matrix using the distance metric indicated during instantiation to file.
Args:
ResultsDict (:obj:`dict` of :obj:`str` mapping to :class:`FunctionLogoResults`):
The values of the :obj:`dict` are compared using the selected pairwise
distance metric.
Note:
Creates a :obj:`dict` of :obj:`str`: :class:`pandas.DataFrame` from
:obj:`ResultsDict`. The index of the dataframes are the union
of the structural features contained in :obj:`ResultsDict`,
and columns labels are the union of the functional classes contained in
:obj:`ResultsDict` including a column containing
the functional information of the feature measured in bits.
Rows contain the Gorodkin fractional heights of each functional
class of each feature along with the functional information of the
feature measured in bits. The fractional heights of
each row is normalized to account for filtering of data and rounding
errors. The :obj:`dict` of :obj:`str`: :obj:`pandas.DataFrame` is
passed to the distance method set when the :class:`DistanceCalculator`
was instantiated. Below is an example of the :class:`pandas.DataFrame`
created\:
+--------+-------+-------+-------+-------+-------+-------+--------+
| | A | C | D | E | F | E | bits |
+========+=======+=======+=======+=======+=======+=======+========+
| 1A | 0.500 | 0.250 | 0.125 | 0.000 | 0.000 | 0.125 | 2.453 |
+--------+-------+-------+-------+-------+-------+-------+--------+
| 1U | 0.000 | 0.250 | 0.125 | 0.500 | 0.125 | 0.000 | 2.453 |
+--------+-------+-------+-------+-------+-------+-------+--------+
"""
for result in ResultsDict:
for coord in ResultsDict[result].basepairs:
if (coord in ResultsDict[result].info):
for pairtype in ResultsDict[result].info[coord]:
self.featureSet.add("{}{}".format("".join(str(i) for i in coord), pairtype))
for function in ResultsDict[result].height[coord][pairtype]:
self.functionSet.add(function)
for coord in range(ResultsDict[result].pos):
if (coord in ResultsDict[result].info):
for base in ResultsDict[result].info[coord]:
self.featureSet.add("{}{}".format(coord, base))
for function in ResultsDict[result].height[coord][base]:
self.functionSet.add(function)
# add inverse info features
for coord in ResultsDict[result].basepairs:
if (coord in ResultsDict[result].inverseInfo):
for pairtype in ResultsDict[result].inverseInfo[coord]:
self.featureSet.add("i{}{}".format("".join(str(i) for i in coord), pairtype))
for coord in range(ResultsDict[result].pos):
if (coord in ResultsDict[result].inverseInfo):
for base in ResultsDict[result].inverseInfo[coord]:
self.featureSet.add("i{}{}".format(coord, base))
# remove features that contain gaps
self.featureSet = {feature for feature in self.featureSet if not "-" in feature}
# prepare pandas dataframes for each result object
functionDict = {}
pandasDict = {}
for function in self.functionSet:
functionDict[function] = np.zeros(len(self.featureSet), )
functionDict["bits"] = np.zeros(len(self.featureSet), )
for result in ResultsDict:
pandasDict[result] = pd.DataFrame(functionDict, index=self.featureSet)
for coord in ResultsDict[result].basepairs:
if (coord in ResultsDict[result].info):
for pairtype in [pair for pair in ResultsDict[result].info[coord] if not "-" in pair]:
row = "{}{}".format("".join(str(i) for i in coord), pairtype)
pandasDict[result].loc[row, "bits"] = ResultsDict[result].info[coord][pairtype]
for function in ResultsDict[result].height[coord][pairtype]:
pandasDict[result].loc[row, function] = ResultsDict[result].height[coord][pairtype][
function]
for coord in range(ResultsDict[result].pos):
if (coord in ResultsDict[result].info):
for base in [nuc for nuc in ResultsDict[result].info[coord] if not nuc == "-"]:
row = "{}{}".format(coord, base)
pandasDict[result].loc[row, "bits"] = ResultsDict[result].info[coord][base]
for function in ResultsDict[result].height[coord][base]:
pandasDict[result].loc[row, function] = ResultsDict[result].height[coord][base][function]
for coord in ResultsDict[result].basepairs:
if (coord in ResultsDict[result].inverseInfo):
for pairtype in [pair for pair in ResultsDict[result].inverseInfo[coord] if not "-" in pair]:
row = "i{}{}".format("".join(str(i) for i in coord), pairtype)
pandasDict[result].loc[row, "bits"] = ResultsDict[result].inverseInfo[coord][pairtype]
for function in ResultsDict[result].inverseHeight[coord][pairtype]:
pandasDict[result].loc[row, function] = ResultsDict[result].inverseHeight[coord][pairtype][
function]
for coord in range(ResultsDict[result].pos):
if (coord in ResultsDict[result].inverseInfo):
for base in [nuc for nuc in ResultsDict[result].inverseInfo[coord] if not nuc == "-"]:
row = "i{}{}".format(coord, base)
pandasDict[result].loc[row, "bits"] = ResultsDict[result].inverseInfo[coord][base]
for function in ResultsDict[result].inverseHeight[coord][base]:
pandasDict[result].loc[row, function] = ResultsDict[result].inverseHeight[coord][base][
function]
# normalize heights to equal one after possible removal of CIFs based on some criteria
for frame in pandasDict:
pandasDict[frame] = pandasDict[frame].round(3)
pandasDict[frame].drop('bits', axis=1).div(pandasDict[frame].drop('bits', axis=1).sum(axis=1), axis=0)
if (self.distanceMetric == "jsd"):
self.rJSD(pandasDict)
elif (self.distanceMetric == "ID"):
self.informationDifference(pandasDict, ResultsDict)
def rJSD(self, pandasDict):
"""
Produces pairwise comparisons using rJSD metric
This is method should not be directly called. Instead use the
:meth:`get_distance`. All pairwise comparsions of OTUs are produced
and :meth:`rJSD_distance` is called to do the calculations.
Args:
pandasDict (:obj:`dict` of `str` mapping to :class:`pandas.DataFrame`):
See :meth:`get_distance` for the format of the Data Frames.
"""
pairwise_combinations = itertools.permutations(pandasDict.keys(), 2)
jsdDistMatrix = pd.DataFrame(index=list(pandasDict.keys()), columns=list(pandasDict.keys()))
jsdDistMatrix = jsdDistMatrix.fillna(0)
for pair in pairwise_combinations:
distance = 0
for i, row in pandasDict[pair[0]].iterrows():
if (row['bits'] == 0 and pandasDict[pair[1]].loc[i, 'bits'] == 0):
continue
else:
distance += self.rJSD_distance(row.drop('bits').as_matrix(),
pandasDict[pair[1]].loc[i,].drop('bits').as_matrix(),
row['bits'], pandasDict[pair[1]].loc[i, 'bits'])
jsdDistMatrix.loc[pair[0], pair[1]] = distance
jsdDistMatrix = jsdDistMatrix.round(6)
jsdDistMatrix.to_csv("jsdDistance.matrix", sep="\t")
def entropy(self, dist):
return np.sum(-dist[dist != 0] * np.log2(dist[dist != 0]))
def rJSD_distance(self, dist1, dist2, Ix, Iy):
r"""
Weighted square root of the generalized Jensen-Shannon divergence defined by Lin 1991
.. math::
D(X,Y) \equiv \sum_{f \in F} (I_f^X + I_f^Y) \sqrt{H[\pi_f^X p_f^X + \pi_f^Y p_f^Y] - (\pi_f^X H[p_f^X] + \pi_f^Y H[p_f^Y])}
where :math:`\pi_f^X = \frac{I_f^X}{I_f^X + I_f^Y}` and :math:`\pi_f^Y = \frac{I_f^Y}{I_f^X + I_f^Y}`
"""
pi1 = Ix / (Ix + Iy)
pi2 = Iy / (Ix + Iy)
step = self.entropy(pi1 * dist1 + pi2 * dist2) - (pi1 * self.entropy(dist1) + pi2 * self.entropy(dist2))
return (Ix + Iy) * mt.sqrt(step if step >= 0 else 0)
class FunctionLogoResults:
"""
Stores results from information calculations and provides methods for text output and visualization.
Args:
name (:obj:`str`): Value is used as prefix for output files.
basepairs (:obj:`list` of :obj:`tuples` of (:obj:`int`, :obj:`int`)):
a list of basepair coordinates encoded as a :obj:`tuple` of two
:obj:`int`.
Note:
This data structure is created as an attribute of
:class:`FunctionLogo` during instantiation and can be accessed
with :attr:`FunctionLogo.basepairs` or created during
instantiation of this class when ``from_file = True``
pos (:obj:`int`): Stores length of the alignment.
Note:
See note for :attr:`basepairs`. Accessed using :attr:`FunctionLogo.pos`.
sequences (:obj:`list` of :class:`Seq`): a list of :class:`Seq` objects
used for text output and visualization.
Note:
See note for :attr:`basepairs`. Accessed using :attr:`FunctionLogo.seq`
pairs (:obj:`set` of :obj:`str`): unique basepair states found in the dataset.
Note:
See note for :attr:`basepairs`.
singles (:obj:`set` of :obj:`str`): unique states for single sites.
Note:
See note for :attr:`basepairs`.
info (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):
mapping of structural features to information content. Add this data structure using :meth:`add_information`.
Note:
This data structure is output of
:meth:`FunctionLogo.calculate_entropy_NSB()` or
:meth:`FunctionLogo.calculate_entropy_MM()`.
height (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):
mapping of structural features and functional class to class height. Add this data structure using :meth:`add_information`.
Note:
This data structure is output of
:meth:`FunctionLogo.calculate_entropy_NSB()` or
:meth:`FunctionLogo.calculate_entropy_MM()`.
inverseInfo (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):
mapping of structural features to information content for anti-determinants. Add this data structure using :meth:`add_information`.
Note:
This data structure is output of
:meth:`FunctionLogo.calculate_entropy_inverse_NSB()` or
:meth:`FunctionLogo.calculate_entropy_inverse_MM()`.
inverseHeight (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):
mapping of structural features and functional class to class height for anti-determinants. Add this data structure using :meth:`add_information`.
Note:
This data structure is output of
:meth:`FunctionLogo.calculate_entropy_inverse_NSB()` or
:meth:`FunctionLogo.calculate_entropy_inverse_MM()`.
p (:obj:`dict` of :obj:`str` mapping to :obj:`dict`): mapping of structural features and class height to p-values.
Note:
This data structure is created using :meth:`add_stats()`
inverse_p (:obj:`dict` of :obj:`str` mapping to :obj:`dict`): mapping of structural features and class height to p-values for anti-determinants
Note:
This data structure is created using :meth:`add_stats()`
from_file (:obj:`bool`): create :class:`FunctionLogoResults`
object from file written with
:meth:`FunctionLogResults.text_output`
"""
def __init__(self, name, basepairs=None, pos=0, sequences=None, pairs=None, singles=None, info=None,
height=None, inverseInfo=None, inverseHeight=None, p=None,
inverse_p=None, from_file=False):
self.pos = pos
self.correction = ""
if (not info):
self.info = defaultdict(lambda: defaultdict(float))
self.height = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
else:
self.info = info
self.height = height
if (not inverseInfo):
self.inverseInfo = defaultdict(lambda: defaultdict(float))
self.inverseHeight = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
else:
self.inverseInfo = inverseInfo
self.inverseHeight = inverseHeight
if (not p):
self.p = {'P': defaultdict(lambda: defaultdict(float)),
'p': defaultdict(lambda: defaultdict(lambda: defaultdict(float))),
'P_corrected': defaultdict(lambda: defaultdict(float)),
'p_corrected': defaultdict(lambda: defaultdict(lambda: defaultdict(float)))}
else:
self.p = p
if (not inverse_p):
self.inverse_p = {'P': defaultdict(lambda: defaultdict(float)),
'p': defaultdict(lambda: defaultdict(lambda: defaultdict(float))),
'P_corrected': defaultdict(lambda: defaultdict(float)),
'p_corrected': defaultdict(lambda: defaultdict(lambda: defaultdict(float)))}
else:
self.inverse_p = inverse_p
if (not basepairs):
self.basepairs = []
else:
self.basepairs = basepairs
if (not sequences):
self.sequences = []
else:
self.sequences = sequences
if (not pairs):
self.pairs = set()
else:
self.pairs = pairs
if (not singles):
self.singles = set()
else:
self.singles = singles
if (from_file):
self.name = name.split("/")[-1]
self.from_file(name)
else:
self.name = name
def from_file(self, file_name):
"""
Read previously calculated results from file.
Populates :class:`FunctionLogoResults` from previously calculated
results written to a file using :meth:`text_output`.
Args:
file_name(:obj:`str`): File path of previously caclulated results
"""
pvalue = False
file_handle = open(file_name, "r")
for line in file_handle:
if (line.startswith("#")):
if ("p-value" in line):
pvalue = True
else:
line = line.strip()
spline = line.split("\t")
if (spline[0] == "bp:"):
if (not make_tuple(spline[1]) in self.basepairs):
self.basepairs.append(make_tuple(spline[1]))
self.pairs.add(spline[2])
self.info[make_tuple(spline[1])][spline[2]] = float(spline[4])
if (pvalue):
self.p['P'][make_tuple(spline[1])][spline[2]] = float(spline[5])
self.p['P_corrected'][make_tuple(spline[1])][spline[2]] = float(spline[6])
for function in spline[7].split():
function_split = function.split(":")
self.height[make_tuple(spline[1])][spline[2]][function_split[0]] = float(function_split[1])
if (pvalue):
self.p['p'][make_tuple(spline[1])][spline[2]][function_split[0]] = float(function_split[2])
self.p['p_corrected'][make_tuple(spline[1])][spline[2]][function_split[0]] = float(
function_split[3])
elif (spline[0] == "ss:"):
if (self.pos < int(spline[1])):
self.pos = int(spline[1])
self.singles.add(spline[2])
self.info[int(spline[1])][spline[2]] = float(spline[4])
if (pvalue):
self.p['P'][int(spline[1])][spline[2]] = float(spline[5])
self.p['P_corrected'][int(spline[1])][spline[2]] = float(spline[6])
for function in spline[7].split():
function_split = function.split(":")
self.height[int(spline[1])][spline[2]][function_split[0]] = float(function_split[1])
if (pvalue):
self.p['p'][int(spline[1])][spline[2]][function_split[0]] = float(function_split[2])
self.p['p_corrected'][int(spline[1])][spline[2]][function_split[0]] = float(
function_split[3])
elif (spline[0] == "ibp:"):
if (not make_tuple(spline[1]) in self.basepairs):
self.basepairs.append(make_tuple(spline[1]))
self.pairs.add(spline[2])
self.inverseInfo[make_tuple(spline[1])][spline[2]] = float(spline[4])
if (pvalue):
self.inverse_p['P'][make_tuple(spline[1])][spline[2]] = float(spline[5])
self.inverse_p['P_corrected'][make_tuple(spline[1])][spline[2]] = float(spline[6])
for function in spline[7].split():
function_split = function.split(":")
self.inverseHeight[make_tuple(spline[1])][spline[2]][function_split[0]] = float(
function_split[1])
if (pvalue):
self.inverse_p['p'][make_tuple(spline[1])][spline[2]][function_split[0]] = float(
function_split[2])
self.inverse_p['p_corrected'][make_tuple(spline[1])][spline[2]][function_split[0]] = float(
function_split[3])
elif (spline[0] == "iss:"):
if (self.pos < int(spline[1])):
self.pos = int(spline[1])
self.singles.add(spline[2])
self.inverseInfo[int(spline[1])][spline[2]] = float(spline[4])
if (pvalue):
self.inverse_p['P'][int(spline[1])][spline[2]] = float(spline[5])
self.inverse_p['P_corrected'][int(spline[1])][spline[2]] = float(spline[6])
for function in spline[7].split():
function_split = function.split(":")
self.inverseHeight[int(spline[1])][spline[2]][function_split[0]] = float(function_split[1])
if (pvalue):
self.inverse_p['p'][int(spline[1])][spline[2]][function_split[0]] = float(function_split[2])
self.inverse_p['p_corrected'][int(spline[1])][spline[2]][function_split[0]] = float(
function_split[3])
self.pos += 1 # fix off by one
file_handle.close()
def add_information(self, info, height, inverse=False):
"""
Add data structures containing results from information calculations
This method is used to add results from
:meth:`FunctionLogo.calculate_entropy_NSB()`,
:meth:`FunctionLogo.calculate_entropy_MM()`,
:meth:`FunctionLogo.calculate_entropy_inverse_NSB()` or
:meth:`FunctionLogo.calculate_entropy_inverse_MM()`. If reading previous
results from a file this method is unnecessary because these data structures
are populated from values in the file.
Args:
info (:obj:`dict`): mapping of structural features to information
content. This data structure is output of
:meth:`FunctionLogo.calculate_entropy_NSB()` or
:meth:`FunctionLogo.calculate_entropy_MM()`.
height (:obj:`dict`): mapping of structural features and functional class to class height.
This data structure is output of
:meth:`FunctionLogo.calculate_entropy_NSB()` or
:meth:`FunctionLogo.calculate_entropy_MM()`.
inverse (:obj:`bool`): Defines if the data structures are for
anti-determinates.
"""
if (inverse):
self.inverseInfo = info
self.inverseHeight = height
else:
self.info = info
self.height = height
def add_stats(self, distribution, correction, test, nosingle, inverse=False):
"""
Perform statisical testing and multiple test correction
Calculates p-values and multiple testing corrected p-values for
structural features and functional class heights. Requires an
instance of :class:`FunctionLogoDist` and calls the
:meth:`FunctionLogoDist.stat_test`. Methods for multiple test
correction are provided by :class:`statsmodels.stats.multitest`.
Args:
distribution (:class:`FunctionLogoDist`): discrete probability
distributions of information content of structural
features and functional class height.
correction (:obj:`str`): Multiple test correction method.
test (:obj:`str`): Indicate statistical testing and multiple test correction of only stack height, only letter height, or both.
nosingle (:obj:`str`): Indicate statistical testing and multiple test correction of basepair features only.
inverse (:obj:`bool`): Produce statistical tests for
anti-determinates.
"""
self.correction = correction
if (inverse):
self.inverse_p = distribution.stat_test(self.inverseInfo, self.inverseHeight,
correction, test, nosingle)
else:
self.p = distribution.stat_test(self.info, self.height, correction, test,
nosingle)
def get(self, position, state):
ret_counter = Counter()
if (len(position) == 1):
for x in self.sequences:
if (x.seq[position[0]] == state[0]):
ret_counter[x.function] += 1
if (len(position) == 2):
for x in self.sequences:
if (x.seq[position[0]] == state[0] and x.seq[position[1]] == state[1]):
ret_counter[x.function] += 1
return ret_counter
def text_output(self, correction):
"""
Write results to file named\: :attr:`name`\_results.txt
"""
# build output heading
file_handle = open("{}_CIFs.txt".format(self.name.split("/")[-1]), "w")
heading_dict = {}
if (self.p):
heading_dict['P'] = "\tp-value \t{:<10}".format(correction)
heading_dict['p'] = "\tclass:height:p-value:{}".format(correction)
else:
heading_dict['P'] = ""
heading_dict['p'] = "\tclass:height"
print("#bp\tcoord\tstate\tN\tinfo{P}{p}".format(**heading_dict), file=file_handle)
for coord in sorted(self.basepairs, key=itemgetter(0)):
if (coord in self.info):
for pairtype in sorted(self.info[coord]):
output_string = "bp:\t{}".format(coord)
output_string += "\t{}\t{}\t{:05.3f}\t".format(pairtype, sum(self.get(coord, pairtype).values()),
self.info[coord][pairtype])
if (self.p):
if coord in self.p['P']:
output_string += "{:08.6f}".format(self.p['P'][coord][pairtype])
output_string += "\t{:08.6f}".format(self.p['P_corrected'][coord][pairtype])
else:
output_string += "NA"
output_string += "\tNA"
output_string += "\t"
for aainfo in sorted(self.height[coord][pairtype].items(), key=itemgetter(1), reverse=True):
output_string += "{}:{:05.3f}".format(aainfo[0], aainfo[1])
if (self.p):
if coord in self.p['p']:
output_string += ":{:08.6f}".format(self.p['p'][coord][pairtype][aainfo[0].upper()])
output_string += ":{:08.6f}".format(
self.p['p_corrected'][coord][pairtype][aainfo[0].upper()])
else:
output_string += ":NA"
output_string += ":NA"
output_string += " "
print(output_string, file=file_handle)
if (self.inverseInfo):
print("#ibp\tcoord\tstate\tN\tinfo{P}{p}".format(**heading_dict), file=file_handle)
for coord in sorted(self.basepairs, key=itemgetter(0)):
if (coord in self.inverseInfo):
for pairtype in sorted(self.inverseInfo[coord]):
output_string = "ibp:\t{}".format(coord)
output_string += "\t{}\t{}\t{:05.3f}\t".format(pairtype, sum(self.get(coord, pairtype).values()),
self.inverseInfo[coord][pairtype])
if (self.p):
if coord in self.inverse_p['P']:
output_string += "{:08.6f}".format(self.inverse_p['P'][coord][pairtype])
output_string += "\t{:08.6f}".format(self.inverse_p['P_corrected'][coord][pairtype])
else:
output_string += "NA"
output_string += "\tNA"
output_string += "\t"
for aainfo in sorted(self.inverseHeight[coord][pairtype].items(), key=itemgetter(1), reverse=True):
output_string += "{}:{:05.3f}".format(aainfo[0], aainfo[1])
if (self.p):
if coord in self.inverse_p['p']:
output_string += ":{:08.6f}".format(
self.inverse_p['p'][coord][pairtype][aainfo[0].upper()])
output_string += ":{:08.6f}".format(
self.inverse_p['p_corrected'][coord][pairtype][aainfo[0].upper()])
else:
output_string += ":NA"
output_string += ":NA"
output_string += " "
print(output_string, file=file_handle)
print("#ss\tcoord\tstate\tN\tinfo{P}{p}".format(**heading_dict), file=file_handle)
for coord in range(self.pos):
if (coord in self.info):
for base in sorted(self.info[coord]):
output_string = "ss:\t{}\t{}\t{}\t{:05.3f}".format(coord, base,
sum(self.get([coord], base).values()),
self.info[coord][base])
if (self.p):
if coord in self.p['P']:
output_string += "\t{:08.6f}".format(self.p['P'][coord][base])
output_string += "\t{:08.6f}".format(self.p['P_corrected'][coord][base])
else:
output_string += "\tNA"
output_string += "\tNA"
output_string += "\t"
for aainfo in sorted(self.height[coord][base].items(), key=itemgetter(1), reverse=True):
output_string += "{}:{:05.3f}".format(aainfo[0], aainfo[1])
if (self.p):
if coord in self.p['p']:
output_string += ":{:08.6f}".format(self.p['p'][coord][base][aainfo[0].upper()])
output_string += ":{:08.6f}".format(
self.p['p_corrected'][coord][base][aainfo[0].upper()])
else:
output_string += ":NA"
output_string += ":NA"
output_string += " "
print(output_string, file=file_handle)
if (self.inverseInfo):
print("#iss\tcoord\tstate\tN\tinfo{P}{p}".format(**heading_dict), file=file_handle)
for coord in range(self.pos):
if (coord in self.inverseInfo):
for base in sorted(self.inverseInfo[coord]):
output_string = "iss:\t{}\t{}\t{}\t{:05.3f}".format(coord, base,
sum(self.get([coord], base).values()),
self.inverseInfo[coord][base])
if (self.p):
if coord in self.inverse_p['P']:
output_string += "\t{:08.6f}".format(self.inverse_p['P'][coord][base])
output_string += "\t{:08.6f}".format(self.inverse_p['P_corrected'][coord][base])
else:
output_string += "\tNA"
output_string += "\tNA"
output_string += "\t"
for aainfo in sorted(self.inverseHeight[coord][base].items(), key=itemgetter(1), reverse=True):
output_string += "{}:{:05.3f}".format(aainfo[0], aainfo[1])
if (self.p):
if coord in self.inverse_p['p']:
output_string += ":{:08.6f}".format(self.inverse_p['p'][coord][base][aainfo[0].upper()])
output_string += ":{:08.6f}".format(
self.inverse_p['p_corrected'][coord][base][aainfo[0].upper()])
else:
output_string += ":NA"
output_string += ":NA"
output_string += " "
print(output_string, file=file_handle)
file_handle.close()
def logo_output(self, inverse=False, logo_prefix="", logo_postfix=""):
"""
Produce function logo postscript files
"""
coord_length = 0 # used to determine eps height
coord_length_addition = 0
logo_outputDict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
# logo output dict construction
for coord in sorted(self.basepairs, key=itemgetter(0)):
for pairtype in sorted(self.pairs):
if (pairtype in self.info[coord]):
for aainfo in sorted(self.height[coord][pairtype].items(), key=itemgetter(1), reverse=True):
logo_outputDict[pairtype][coord][aainfo[0]] = self.info[coord][pairtype] * aainfo[1]
else:
logo_outputDict[pairtype][coord] = {}
for coord in range(self.pos):
for base in sorted(self.singles):
if (base in self.info[coord]):
for aainfo in sorted(self.height[coord][base].items(), key=itemgetter(1), reverse=True):
logo_outputDict[base][coord][aainfo[0]] = self.info[coord][base] * aainfo[1]
else:
logo_outputDict[base][coord] = {}
# output logos
for base in logo_outputDict:
logodata = ""
for coord in sorted(logo_outputDict[base].keys()):
if (len(str(coord)) > coord_length):
coord_length = len(str(coord))
logodata += "numbering {{({}) makenumber}} if\ngsave\n".format(coord)
for aainfo in sorted(logo_outputDict[base][coord].items(), key=itemgetter(1)):
if (aainfo[1] < 0.0001 or mt.isnan(aainfo[1])):
continue
logodata += "{:07.5f} ({}) numchar\n".format(aainfo[1], aainfo[0].upper())
logodata += "grestore\nshift\n"
# output logodata to template
template_byte = pkgutil.get_data('tsfm', 'eps/Template.eps')
logo_template = template_byte.decode('utf-8')
if (logo_postfix):
filename = "{}_{}_{}_{}.eps".format(logo_prefix, base, logo_postfix, self.name.split("/")[-1])
else:
filename = "{}_{}_{}.eps".format(logo_prefix, base, self.name.split("/")[-1])
with open(filename, "w") as logo_output:
src = Template(logo_template)
if (len(base) == 2):
logodata_dict = {'logo_data': logodata, 'low': min(logo_outputDict[base].keys()),
'high': max(logo_outputDict[base].keys()),
'length': 21 * len(logo_outputDict[base].keys()),
'height': 735 - (5 * (coord_length + coord_length_addition))}
else:
logodata_dict = {'logo_data': logodata, 'low': min(logo_outputDict[base].keys()),
'high': max(logo_outputDict[base].keys()),
'length': 15.68 * len(logo_outputDict[base].keys()),
'height': 735 - (5 * (coord_length + coord_length_addition))}
logo_output.write(src.substitute(logodata_dict))
if (inverse):
inverse_logo_outputDict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
# inverse logo output dict construction
for coord in sorted(self.basepairs, key=itemgetter(0)):
for pairtype in sorted(self.pairs):
if (pairtype in self.inverseInfo[coord]):
for aainfo in sorted(self.inverseHeight[coord][pairtype].items(), key=itemgetter(1),
reverse=True):
inverse_logo_outputDict[pairtype][coord][aainfo[0]] = self.inverseInfo[coord][pairtype] * \
aainfo[1]
else:
inverse_logo_outputDict[pairtype][coord] = {}
for coord in range(self.pos):
for base in sorted(self.singles):
if (base in self.inverseInfo[coord]):
for aainfo in sorted(self.inverseHeight[coord][base].items(), key=itemgetter(1), reverse=True):
inverse_logo_outputDict[base][coord][aainfo[0]] = self.inverseInfo[coord][base] * aainfo[1]
else:
inverse_logo_outputDict[base][coord] = {}
for base in inverse_logo_outputDict:
logodata = ""
for coord in sorted(inverse_logo_outputDict[base].keys()):
if (len(str(coord)) > coord_length):
coord_length = len(str(coord))
logodata += "numbering {{({}) makenumber}} if\ngsave\n".format(coord)
for aainfo in sorted(inverse_logo_outputDict[base][coord].items(), key=itemgetter(1)):
if (aainfo[1] < 0.0001 or mt.isnan(aainfo[1])):
continue
logodata += "{:07.5f} ({}) numchar\n".format(aainfo[1], aainfo[0].upper())
logodata += "grestore\nshift\n"
# output logodata to template
template_byte = pkgutil.get_data('tsfm', 'eps/Template.eps')
logo_template = template_byte.decode('utf-8')
with open("inverse_{}_{}.eps".format(base, self.name.split("/")[-1]), "w") as logo_output:
src = Template(logo_template)
if (len(base) == 2):
logodata_dict = {'logo_data': logodata, 'low': min(inverse_logo_outputDict[base].keys()),
'high': max(inverse_logo_outputDict[base].keys()),
'length': 21 * len(inverse_logo_outputDict[base].keys()),
'height': 735 - (5 * (coord_length + coord_length_addition))}
else:
logodata_dict = {'logo_data': logodata, 'low': min(inverse_logo_outputDict[base].keys()),
'high': max(inverse_logo_outputDict[base].keys()),
'length': 15.68 * len(inverse_logo_outputDict[base].keys()),
'height': 735 - (5 * (coord_length + coord_length_addition))}
logo_output.write(src.substitute(logodata_dict))
class FunctionLogoDist:
"""
Discrete probability distributions of information values.
Probabilty distributions are created using a permutation label shuffling
strategy. Permuted data is created using :meth:`FunctionLogo.permute` and
distribution are inferred from the permuted data and
:class:`FunctionLogoDist` objects created using
:meth:`FunctionLogo.permInfo`.
Attributes:
bpinfodist (:obj:`dict` of :obj:`float` mapping to :obj:`int`):
Discrete probability distribution of basepair feature information
bpheightdist (:obj:`dict` of :obj:`float` mapping to :obj:`int`):
Discrete probability distribution of functional class
information of basepair features
singleinfodist (:obj:`dict` of :obj:`float` mapping to :obj:`int`):
Discrete probability distribution of single base feature information
singleheightdist (:obj:`dict` of :obj:`float` mapping to :obj:`int`):
Discrete probability distribution of functional class
information of single base features
"""
def __init__(self):
self.bpinfodist = defaultdict(int)
self.bpheightdist = defaultdict(int)
self.singleinfodist = defaultdict(int)
self.singleheightdist = defaultdict(int)
def weighted_dist(self, bpdata, singledata):
for x in bpdata[0]:
self.bpinfodist[x] += 1
for x in bpdata[1]:
self.bpheightdist[x] += 1
for x in singledata[0]:
self.singleinfodist[x] += 1
for x in singledata[1]:
self.singleheightdist[x] += 1
self.bpinfo_sorted_keys = sorted(self.bpinfodist.keys())
self.bpheight_sorted_keys = sorted(self.bpheightdist.keys())
self.ssinfo_sorted_keys = sorted(self.singleinfodist.keys())
self.ssheight_sorted_keys = sorted(self.singleheightdist.keys())
def stat_test(self, info, height, correction, test, features):
"""
Performs statistical tests and multiple test correction.
Calculates a p-value using a right tail probability test on the
instance's discrete probability distributions. Methods for multiple test
correction are provided by :class:`statsmodels.stats.multitest`. This
method is usually invoked using :meth:`FunctionLogoResults.add_stats`.
Args:
info (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):
mapping of structural features to information content.
height (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):
mapping of structural features and functional class to class height.
correction (:obj:`str`): Method for multiple test correction. Any
method available in :class:`statsmodels.stats.multitest` is a
valid option
test (:obj:`str`): Indicate statistical testing and multiple test correction of only stack height, only letter height, or both.
features (:obj:`str`): Indicate statistical testing and multiple test correction of basepair features only, single sites only or both.
"""
P = defaultdict(lambda: defaultdict(float))
P_corrected = defaultdict(lambda: defaultdict(float))
p = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
p_corrected = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
bp_coords = []
ss_coords = []
test_bp_stack = []
test_ss_stack = []
test_bp_letter = []
test_ss_letter = []
for coord in info:
for pairtype in info[coord]:
if "," in str(coord) and (features == "pairs" or features == "both"):
bp_coords.append(coord)
if test == "stacks" or test == "both":
P[coord][pairtype] = self.rtp(self.bpinfodist, info[coord][pairtype], self.bpinfo_sorted_keys)
if test == "letters" or test == "both":
for aa in height[coord][pairtype]:
p[coord][pairtype][aa] = self.rtp(self.bpheightdist,
info[coord][pairtype] * height[coord][pairtype][aa],
self.bpheight_sorted_keys)
elif features == "singles" or features == "both":
ss_coords.append(coord)
if test == "stacks" or test == "both":
P[coord][pairtype] = self.rtp(self.singleinfodist, info[coord][pairtype],
self.ssinfo_sorted_keys)
if test == "letters" or test == "both":
for aa in height[coord][pairtype]:
p[coord][pairtype][aa] = self.rtp(self.singleheightdist,
info[coord][pairtype] * height[coord][pairtype][aa],
self.ssheight_sorted_keys)
if test == "stacks" or test == "both":
if features == "pairs" or features == "both":
bp_coords.sort()
for coord in bp_coords:
for pairtype in sorted(P[coord]):
test_bp_stack.append(P[coord][pairtype])
if features == "singles" or features == "both":
ss_coords.sort()
for coord in ss_coords:
for pairtype in sorted(P[coord]):
test_ss_stack.append(P[coord][pairtype])
if test == "letters" or test == "both":
if features == "pairs" or features == "both":
for coord in bp_coords:
for pairtype in sorted(p[coord]):
for aa in sorted(p[coord][pairtype]):
test_bp_letter.append(p[coord][pairtype][aa])
if features == "singles" or features == "both":
for coord in ss_coords:
for pairtype in sorted(p[coord]):
for aa in sorted(p[coord][pairtype]):
test_ss_letter.append(p[coord][pairtype][aa])
test_bpss_results = \
smm.multipletests(test_bp_stack + test_ss_stack + test_bp_letter + test_ss_letter, method=correction)[
1].tolist()
if test == "stacks" or test == "both":
if features == "pairs" or features == "both":
for coord in bp_coords:
for pairtype in sorted(P[coord]):
P_corrected[coord][pairtype] = test_bpss_results.pop(0)
if features == "singles" or features == "both":
for coord in ss_coords:
for pairtype in sorted(P[coord]):
P_corrected[coord][pairtype] = test_bpss_results.pop(0)
if test == "letters" or test == "both":
if features == "pairs" or features == "both":
for coord in bp_coords:
for pairtype in sorted(p[coord]):
for aa in sorted(p[coord][pairtype]):
p_corrected[coord][pairtype][aa] = test_bpss_results.pop(0)
if features == "singles" or features == "both":
for coord in ss_coords:
for pairtype in sorted(p[coord]):
for aa in sorted(p[coord][pairtype]):
p_corrected[coord][pairtype][aa] = test_bpss_results.pop(0)
return {'P': P, 'p': p, "P_corrected": P_corrected, "p_corrected": p_corrected}
def rtp(self, data, point, keys_sorted):
if (point > 0):
part = 0
total = sum(data.values())
i = bisect.bisect_left(keys_sorted, point)
if (point <= keys_sorted[-1]):
for y in keys_sorted[i:]:
part += data[y]
return (part + 1) / (total + 1)
else:
return 1 / (total + 1)
else:
return 1.0
class Seq:
"""
Providing a data structure constisting of a molecular sequence labeled with a functional class.
Args:
function (:obj:`str`): Functional annotation of the sequence.
seq (:obj:`str`): Molecular sequence data.
"""
def __init__(self, function, seq):
self.function = function
self.seq = seq
def __len__(self):
return len(self.seq)
class FunctionLogo:
"""
Parses structural and sequence infomation and provides methods for Function Logo calculations
This class provided data structures and methods for calculating
functional information of basepair a single base features. Additionally,
methods for producing permuted data sets with function class labels
shuffled.
Args:
struct_file (:obj:`str`): File name containing secondary structure
notation in cove, infernal, or text format.
kind (:obj:`str`): secondary structure notation format.
"""
def __init__(self, struct_file, kind=None, exact_init=None, inverse_init=None):
if exact_init:
self.exact = exact_init
else:
self.exact = []
if inverse_init:
self.inverse_exact = inverse_init
else:
self.inverse_exact = []
if kind:
if kind == "s":
self.basepairs = []
else:
self.parse_struct(struct_file, kind)
else:
self.basepairs = struct_file
self.pos = 0
self.sequences = []
self.pairs = set()
self.singles = set()
self.functions = Counter()
def parse_sequences(self, file_prefix):
"""
Parse sequence alignment data in clustal format
Sequence alignment files are required to be in clustal format with
each functional class having its own file. Alignment files must
conform to the naming standard ``fileprefix_functionalclass.aln``.
Args:
file_prefix (:obj:`str`): Prefix used to identify a group of alignment files.
"""
for fn in glob.glob("{}_?.aln".format(file_prefix)):
match = re.search(r"_([A-Z])\.aln", fn)
aa_class = match.group(1)
with open(fn, "r") as ALN:
good = False
begin_seq = False
interleaved = False
seq = {}
for line in ALN:
match = re.search(r"^(\S+)\s+(\S+)", line)
if (re.search(r"^CLUSTAL", line)):
good = True
continue
elif (re.search(r"^[\s\*\.\:]+$", line) and not interleaved and begin_seq):
interleaved = True
elif (re.search(r"^[\s\*\.\:]+$", line) and interleaved and begin_seq):
continue
elif (match and not interleaved):
begin_seq = True
if (not good):
sys.exit("File {} appears not to be a clustal file".format(fn))
seq[match.group(1)] = match.group(2)
elif (match and interleaved):
seq[match.group(1)] += match.group(2)
for sequence in seq.values():
self.add_sequence(aa_class, sequence.upper().replace("T", "U"))
print("{} alignments parsed".format(len(self.functions.keys())), file=sys.stderr)
def parse_struct(self, struct_file, kind):
"""
Parse secondary structure file for basepair locations.
Args:
struct_file (:obj:`str`): File containing structural annotation
kind (:obj:`str`): Structural annotation format
"""
print("Parsing base-pair coordinates", file=sys.stderr)
basepairs = []
ss = ""
pairs = defaultdict(list)
tarm = 0
stack = []
if (kind == "infernal"):
for line in struct_file:
line = line.strip()
ss += line.split()[2]
struct_file.seek(0)
state = "start"
for count, i in enumerate(ss):
if (i == "("):
if (state == "start"):
state = "A"
elif (i == "<"):
stack.append(count)
if (state == "A"):
state == "D"
elif (state == "cD"):
state = "C"
elif (state == "cC"):
state = "T"
elif (i == ">"):
if (state == "D"):
state = "cD"
elif (state == "C"):
state = "cC"
elif (state == "T"):
state = "cT"
arm = state.replace("c", "")
pairs[arm].append([stack.pop(), count])
elif (i == ")"):
pairs['A'].append([stack.pop(), count])
for arm in pairs:
for pair in pairs[arm]:
basepairs.append((pair[0], pair[1]))
if (kind == "cove"):
for line in struct_file:
line = line.strip()
ss += line.split()[1]
struct_file.seek(0)
state = "start"
for count, i in enumerate(ss):
if (i == ">" and (state == "start" or state == "AD")):
if (state == "start"):
state = "AD"
stack.append(count)
elif (i == "<" and (state == "AD" or state == "D")):
if (state == "AD"):
state = "D"
pairs[state].append([stack.pop(), count])
elif (i == ">" and (state == "D" or state == "C")):
if (state == "D"):
state = "C"
stack.append(count)
elif (i == "<" and (state == "C" or state == "cC")):
if (state == "C"):
state = "cC"
pairs["C"].append([stack.pop(), count])
elif (i == ">" and (state == "cC" or state == "T")):
if (state == "cC"):
state = "T"
stack.append(count)
tarm += 1
elif (i == "<" and (state == "T" and tarm > 0)):
pairs[state].append([stack.pop(), count])
tarm -= 1
elif (i == "<" and (state == "T" or state == "A") and tarm == 0):
state = "A"
pairs[state].append([stack.pop(), count])
for arm in pairs:
for pair in pairs[arm]:
basepairs.append((pair[0], pair[1]))
if (kind == "text"):
for line in struct_file:
coords = "".join(line.split(":")[1])
coords = coords.split(",")
for coord1, coord2 in zip(coords[0::2], coords[1::2]):
basepairs.append((int(coord1), int(coord2)))
self.basepairs = basepairs
def approx_expect(self, H, k, N):
return H - ((k - 1) / ((mt.log(4)) * N))
def exact_run(self, n, p, numclasses):
j = exact.calc_exact(n, p, numclasses)
print("{:2} {:07.5f}".format(n, j[1]), file=sys.stderr)
return j
def permuted(self, items, pieces=2):
random.seed(int.from_bytes(os.urandom(4), byteorder='little'))
sublists = [[] for i in range(pieces)]
for x in items:
sublists[random.randint(0, pieces - 1)].append(x)
permutedList = []
for i in range(pieces):
time.sleep(0.01)
random.seed()
random.shuffle(sublists[i])
permutedList.extend(sublists[i])
return permutedList
def permutations(self, numPerm, aa_classes):
indices = []
permStructList = []
for p in range(numPerm):
indices.append(self.permuted(aa_classes))
for index in indices:
permStruct = FunctionLogo(self.basepairs, exact_init=self.exact, inverse_init=self.inverse_exact)
for i, seqs in enumerate(self.sequences):
permStruct.add_sequence(index[i], seqs.seq)
permStructList.append(permStruct)
return permStructList
def permute(self, permute_num, proc):
"""
Creates permuted datasets by shuffling functional annotation labels of sequences.
Args:
permute_num (:obj:`int`): Number of permutations to perform
proc (:obj:`int`): Number of concurrent processes to run
"""
with Pool(processes=proc) as pool:
perm_jobs = []
for x in range(proc):
if (x == 0):
perm_jobs.append((permute_num // proc + permute_num % proc, self.get_functions()))
else:
perm_jobs.append((permute_num // proc, self.get_functions()))
perm_results = pool.starmap(self.permutations, perm_jobs)
self.permutationList = []
for x in perm_results:
self.permutationList += x
# new bootstrap method for generating bootstrap replicates over functional classes
def bootstrap_sample(self, num_boot, seq_dict):
random.seed(int.from_bytes(os.urandom(4), byteorder='little'))
bootStructList = []
for b in range(num_boot):
bootStruct = FunctionLogo(self.basepairs, exact_init=self.exact, inverse_init=self.inverse_exact)
for function in seq_dict:
bootsample = random.choices(seq_dict[function], k=len(seq_dict[function]))
for sample in bootsample:
bootStruct.add_sequence(sample.function, sample.seq)
bootStructList.append(bootStruct)
return bootStructList
def bootstrap(self, bootstrap_num, proc):
with Pool(processes=proc) as pool:
# build seq dict for bootstrapping
boot_sampling_dict = defaultdict(list)
for seq in self.sequences:
boot_sampling_dict[seq.function].append(seq)
boot_jobs = []
for x in range(proc):
if (x == 0):
boot_jobs.append((bootstrap_num // proc + bootstrap_num % proc, boot_sampling_dict))
else:
boot_jobs.append((bootstrap_num // proc, boot_sampling_dict))
boot_results = pool.starmap(self.bootstrap_sample, boot_jobs)
self.bootstrapList = []
for x in boot_results:
self.bootstrapList += x
def permInfo(self, method, proc, inverse=False):
"""
Calculate functional information statistics of permuted datasets.
Args:
method (:obj:`str`): Entropy estimation method. Either NSB or Miller-Maddow.
proc (:obj:`int`): Number of concurrent processes to run.
Return:
perm_dist (:class:`FunctionLogoDist`): Discrete distribution of
functional information estimated from permuted datasets.
"""
bp_info = []
bp_height = []
single_info = []
single_height = []
with Pool(processes=proc) as pool:
if (len(self.permutationList) < proc):
chunk = 1
else:
chunk = len(self.permutationList) // proc
if (not inverse):
if (method == "NSB"):
perm_info_results = pool.map(self.perm_info_calc_NSB, self.permutationList, chunk)
else:
perm_info_results = pool.map(self.perm_info_calc_MM, self.permutationList, chunk)
else:
if (method == "NSB"):
perm_info_results = pool.map(self.perm_info_calc_inverse_NSB, self.permutationList, chunk)
else:
perm_info_results = pool.map(self.perm_info_calc_inverse_MM, self.permutationList, chunk)
for perm in perm_info_results:
bp_info.extend(perm[0])
single_info.extend(perm[1])
bp_height.extend(perm[2])
single_height.extend(perm[3])
perm_dist = FunctionLogoDist()
perm_dist.weighted_dist((bp_info, bp_height), (single_info, single_height))
return perm_dist
def perm_info_calc_MM(self, x):
total_info_bp = []
height_info_bp = []
total_info_ss = []
height_info_ss = []
info, height_dict = x.calculate_entropy_MM()
for coord in sorted(self.basepairs, key=itemgetter(0)):
if (coord in info):
for pairtype in sorted(info[coord]):
total_info_bp.append(info[coord][pairtype])
for aainfo in sorted(height_dict[coord][pairtype].items(), key=itemgetter(1), reverse=True):
height_info_bp.append(aainfo[1] * info[coord][pairtype])
for coord in range(self.pos):
if (coord in info):
for base in sorted(info[coord]):
total_info_ss.append(info[coord][base])
for aainfo in sorted(height_dict[coord][base].items(), key=itemgetter(1), reverse=True):
height_info_ss.append(aainfo[1] * info[coord][base])
return (total_info_bp, total_info_ss, height_info_bp, height_info_ss)
def perm_info_calc_inverse_MM(self, x):
total_info_bp = []
height_info_bp = []
total_info_ss = []
height_info_ss = []
info, height_dict = x.calculate_entropy_inverse_MM()
for coord in sorted(self.basepairs, key=itemgetter(0)):
if (coord in info):
for pairtype in sorted(info[coord]):
total_info_bp.append(info[coord][pairtype])
for aainfo in sorted(height_dict[coord][pairtype].items(), key=itemgetter(1), reverse=True):
height_info_bp.append(aainfo[1] * info[coord][pairtype])
for coord in range(self.pos):
if (coord in info):
for base in sorted(info[coord]):
total_info_ss.append(info[coord][base])
for aainfo in sorted(height_dict[coord][base].items(), key=itemgetter(1), reverse=True):
height_info_ss.append(aainfo[1] * info[coord][base])
return (total_info_bp, total_info_ss, height_info_bp, height_info_ss)
def perm_info_calc_inverse_NSB(self, x):
total_info_bp = []
height_info_bp = []
total_info_ss = []
height_info_ss = []
info, height_dict = x.calculate_entropy_inverse_NSB()
for coord in sorted(self.basepairs, key=itemgetter(0)):
if (coord in info):
for pairtype in sorted(info[coord]):
total_info_bp.append(info[coord][pairtype])
for aainfo in sorted(height_dict[coord][pairtype].items(), key=itemgetter(1), reverse=True):
height_info_bp.append(aainfo[1] * info[coord][pairtype])
for coord in range(self.pos):
if (coord in info):
for base in sorted(info[coord]):
total_info_ss.append(info[coord][base])
for aainfo in sorted(height_dict[coord][base].items(), key=itemgetter(1), reverse=True):
height_info_ss.append(aainfo[1] * info[coord][base])
return (total_info_bp, total_info_ss, height_info_bp, height_info_ss)
def perm_info_calc_NSB(self, x):
total_info_bp = []
height_info_bp = []
total_info_ss = []
height_info_ss = []
info, height_dict = x.calculate_entropy_NSB()
for coord in sorted(self.basepairs, key=itemgetter(0)):
if (coord in info):
for pairtype in sorted(info[coord]):
total_info_bp.append(info[coord][pairtype])
for aainfo in sorted(height_dict[coord][pairtype].items(), key=itemgetter(1), reverse=True):
height_info_bp.append(aainfo[1] * info[coord][pairtype])
for coord in range(self.pos):
if (coord in info):
for base in sorted(info[coord]):
total_info_ss.append(info[coord][base])
for aainfo in sorted(height_dict[coord][base].items(), key=itemgetter(1), reverse=True):
height_info_ss.append(aainfo[1] * info[coord][base])
return (total_info_bp, total_info_ss, height_info_bp, height_info_ss)
def calculate_exact(self, n, proc, inverse=False):
"""
Exact method of small sample size correction.
Calculate the exact method of sample size correction for up to N samples.
Computational intensive portion of the calculation is implemented as a C
extension. This method is fully described in Schneider et al 1986.
This calculation is polynomial in sample size. It becomes prohibitively
expensive to calculate beyond a sample size of 16. The correction
factor of each sample size will be calculated in parallel up to
:obj:`proc` at a time.
Args:
n (:obj:`int`): Calculate correction up to this sample size.
proc (:obj:`int`): Number of concurrent processes to run
inverse (:obj:`bool`): If true calculate sample size correction
for anti-determinates.
"""
exact_list = []
exact_results = []
if (inverse):
inverse_functions = Counter()
for aa_class in self.functions:
inverse_functions[aa_class] = sum(self.functions.values()) / self.functions[aa_class]
p = [x / sum(list(inverse_functions.values())) for x in inverse_functions.values()]
for i in range(1, n + 1):
exact_list.append((i, p, len(self.functions.values())))
with Pool(processes=proc) as pool:
exact_results = pool.starmap(self.exact_run, exact_list)
for x in exact_results:
self.inverse_exact.append(x[1])
else:
p = [x / sum(list(self.functions.values())) for x in self.functions.values()]
for i in range(1, n + 1):
exact_list.append((i, p, len(self.functions.values())))
with Pool(processes=proc) as pool:
exact_results = pool.starmap(self.exact_run, exact_list)
for x in exact_results:
self.exact.append(x[1])
def calculate_entropy_MM(self):
"""
Calculate functional information using Miller-Maddow estimator.
"""
info = defaultdict(lambda: defaultdict(float))
height_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
functions_array = np.array(list(self.functions.values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
for pairs in self.basepairs:
for state in self.pairs:
state_counts = self.get(pairs, state)
if (sum(state_counts.values()) == 0):
continue
nsb_array = np.array(list(state_counts.values()) + [0] * (len(self.functions) - len(state_counts)))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if (sum(state_counts.values()) <= len(self.exact)):
expected_bg_entropy = self.exact[sum(state_counts.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(self.functions),
sum(state_counts.values()))
if (expected_bg_entropy - fg_entropy < 0):
info[pairs][state] = 0
else:
info[pairs][state] = expected_bg_entropy - fg_entropy
height_class = {}
for aa_class in state_counts:
height_class[aa_class] = (state_counts[aa_class] / sum(state_counts.values())) / (
self.functions[aa_class] / len(self))
for aa_class in height_class:
height_dict[pairs][state][aa_class] = height_class[aa_class] / sum(height_class.values())
for singles in range(self.pos):
for state in self.singles:
state_counts = self.get([singles], state)
if (sum(state_counts.values()) == 0):
continue
nsb_array = np.array(list(state_counts.values()) + [0] * (len(self.functions) - len(state_counts)))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if (sum(state_counts.values()) <= len(self.exact)):
expected_bg_entropy = self.exact[sum(state_counts.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(self.functions),
sum(state_counts.values()))
if (expected_bg_entropy - fg_entropy < 0):
info[singles][state] = 0
else:
info[singles][state] = expected_bg_entropy - fg_entropy
height_class = {}
for aa_class in state_counts:
height_class[aa_class] = (state_counts[aa_class] / sum(state_counts.values())) / (
self.functions[aa_class] / len(self))
for aa_class in height_class:
height_dict[singles][state][aa_class] = height_class[aa_class] / sum(height_class.values())
return (info, height_dict)
def calculate_entropy_inverse_MM(self):
"""
Calculate functional information for anit-determinates using Miller-Maddow estimator.
"""
info_inverse = defaultdict(lambda: defaultdict(float))
height_dict_inverse = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
inverse_functions = Counter()
for aa_class in self.functions:
inverse_functions[aa_class] = sum(self.functions.values()) / self.functions[aa_class]
np_inverse_functions = np.array(list(inverse_functions.values()))
bg_entropy = -np.sum((np_inverse_functions[np_inverse_functions != 0] / np_inverse_functions[
np_inverse_functions != 0].sum()) * np.log2(
np_inverse_functions[np_inverse_functions != 0] / np_inverse_functions[np_inverse_functions != 0].sum()))
for pairs in self.basepairs:
for state in self.pairs:
state_counts = self.get(pairs, state)
if (sum(state_counts.values()) == 0):
continue
if (not len(state_counts) == len(self.functions)):
for function in self.functions:
state_counts[function] += 1
inverse_state_counts = Counter()
for aa_class in state_counts:
inverse_state_counts[aa_class] = sum(state_counts.values()) / state_counts[aa_class]
nsb_array = np.array(list(inverse_state_counts.values()))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if (sum(state_counts.values()) <= len(self.inverse_exact)):
expected_bg_entropy = self.inverse_exact[sum(state_counts.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(self.functions),
sum(state_counts.values()))
if (expected_bg_entropy - fg_entropy < 0):
info_inverse[pairs][state] = 0
else:
info_inverse[pairs][state] = expected_bg_entropy - fg_entropy
height_class = {}
for aa_class in inverse_state_counts:
height_class[aa_class] = (inverse_state_counts[aa_class] / sum(inverse_state_counts.values())) / (
inverse_functions[aa_class] / sum(inverse_functions.values()))
for aa_class in height_class:
height_dict_inverse[pairs][state][aa_class] = height_class[aa_class] / sum(height_class.values())
for singles in range(self.pos):
for state in self.singles:
state_counts = self.get([singles], state)
if (sum(state_counts.values()) == 0):
continue
if (not len(state_counts) == len(self.functions)):
for function in self.functions:
state_counts[function] += 1
inverse_state_counts = Counter()
for aa_class in state_counts:
inverse_state_counts[aa_class] = sum(state_counts.values()) / state_counts[aa_class]
nsb_array = np.array(list(inverse_state_counts.values()))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if (sum(state_counts.values()) <= len(self.inverse_exact)):
expected_bg_entropy = self.inverse_exact[sum(state_counts.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(self.functions),
sum(state_counts.values()))
if (expected_bg_entropy - fg_entropy < 0):
info_inverse[singles][state] = 0
else:
info_inverse[singles][state] = expected_bg_entropy - fg_entropy
height_class = {}
for aa_class in inverse_state_counts:
height_class[aa_class] = (inverse_state_counts[aa_class] / sum(inverse_state_counts.values())) / (
inverse_functions[aa_class] / sum(inverse_functions.values()))
for aa_class in height_class:
height_dict_inverse[singles][state][aa_class] = height_class[aa_class] / sum(height_class.values())
return (info_inverse, height_dict_inverse)
def calculate_entropy_inverse_NSB(self):
"""
Calculate functional information for anit-determinates using NSB estimator.
"""
info_inverse = defaultdict(lambda: defaultdict(float))
height_dict_inverse = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
inverse_functions = Counter()
for aa_class in self.functions:
inverse_functions[aa_class] = sum(self.functions.values()) / self.functions[aa_class]
np_inverse_functions = np.array(list(inverse_functions.values()))
bg_entropy = -np.sum((np_inverse_functions[np_inverse_functions != 0] / np_inverse_functions[
np_inverse_functions != 0].sum()) * np.log2(
np_inverse_functions[np_inverse_functions != 0] / np_inverse_functions[np_inverse_functions != 0].sum()))
for pairs in self.basepairs:
for state in self.pairs:
state_counts = self.get(pairs, state)
if (sum(state_counts.values()) == 0):
continue
if (not len(state_counts) == len(self.functions)):
for function in self.functions:
state_counts[function] += 1
inverse_state_counts = Counter()
for aa_class in state_counts:
inverse_state_counts[aa_class] = sum(state_counts.values()) / state_counts[aa_class]
nsb_array = np.array(list(inverse_state_counts.values()))
if (sum(state_counts.values()) <= len(self.inverse_exact)):
expected_bg_entropy = self.inverse_exact[sum(state_counts.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy < 0):
info_inverse[pairs][state] = 0
else:
info_inverse[pairs][state] = expected_bg_entropy - fg_entropy
height_class = {}
for aa_class in inverse_state_counts:
height_class[aa_class] = (inverse_state_counts[aa_class] / sum(inverse_state_counts.values())) / (
inverse_functions[aa_class] / sum(inverse_functions.values()))
for aa_class in height_class:
height_dict_inverse[pairs][state][aa_class] = height_class[aa_class] / sum(height_class.values())
for singles in range(self.pos):
for state in self.singles:
state_counts = self.get([singles], state)
if (sum(state_counts.values()) == 0):
continue
if (not len(state_counts) == len(self.functions)):
for function in self.functions:
state_counts[function] += 1
inverse_state_counts = Counter()
for aa_class in state_counts:
inverse_state_counts[aa_class] = sum(state_counts.values()) / state_counts[aa_class]
nsb_array = np.array(list(inverse_state_counts.values()))
if (sum(state_counts.values()) <= len(self.inverse_exact)):
expected_bg_entropy = self.inverse_exact[sum(state_counts.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy < 0):
info_inverse[singles][state] = 0
else:
info_inverse[singles][state] = expected_bg_entropy - fg_entropy
height_class = {}
for aa_class in inverse_state_counts:
height_class[aa_class] = (inverse_state_counts[aa_class] / sum(inverse_state_counts.values())) / (
inverse_functions[aa_class] / sum(inverse_functions.values()))
for aa_class in height_class:
height_dict_inverse[singles][state][aa_class] = height_class[aa_class] / sum(height_class.values())
return (info_inverse, height_dict_inverse)
def calculate_entropy_NSB(self):
"""
Calculate functional information using NSB estimator.
"""
info = defaultdict(lambda: defaultdict(float))
height_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
functions_array = np.array(list(self.functions.values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
for pairs in self.basepairs:
for state in self.pairs:
state_counts = self.get(pairs, state)
if (sum(state_counts.values()) == 0):
continue
nsb_array = np.array(list(state_counts.values()) + [0] * (len(self.functions) - len(state_counts)))
if (sum(state_counts.values()) <= len(self.exact)):
expected_bg_entropy = self.exact[sum(state_counts.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy < 0):
info[pairs][state] = 0
else:
info[pairs][state] = expected_bg_entropy - fg_entropy
height_class = {}
for aa_class in state_counts:
height_class[aa_class] = (state_counts[aa_class] / sum(state_counts.values())) / (
self.functions[aa_class] / len(self))
for aa_class in height_class:
height_dict[pairs][state][aa_class] = height_class[aa_class] / sum(height_class.values())
for singles in range(self.pos):
for state in self.singles:
state_counts = self.get([singles], state)
if (sum(state_counts.values()) == 0):
continue
nsb_array = np.array(list(state_counts.values()) + [0] * (len(self.functions) - len(state_counts)))
if (sum(state_counts.values()) <= len(self.exact)):
expected_bg_entropy = self.exact[sum(state_counts.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy < 0):
info[singles][state] = 0
else:
info[singles][state] = expected_bg_entropy - fg_entropy
height_class = {}
for aa_class in state_counts:
height_class[aa_class] = (state_counts[aa_class] / sum(state_counts.values())) / (
self.functions[aa_class] / len(self))
for aa_class in height_class:
height_dict[singles][state][aa_class] = height_class[aa_class] / sum(height_class.values())
return (info, height_dict)
def is_overlap(self, position):
pass
def add_sequence(self, function, seq):
self.sequences.append(Seq(function, seq))
self.functions[function] += 1
self.pos = len(seq)
self.singles.update(seq)
for x in self.basepairs:
self.pairs.add(seq[x[0]] + seq[x[1]])
def get(self, position, state):
ret_counter = Counter()
if (len(position) == 1):
for x in self.sequences:
if (x.seq[position[0]] == state[0]):
ret_counter[x.function] += 1
if (len(position) == 2):
for x in self.sequences:
if (x.seq[position[0]] == state[0] and x.seq[position[1]] == state[1]):
ret_counter[x.function] += 1
return ret_counter
def get_functions(self):
function_list = []
for key, val in self.functions.items():
function_list.extend([key] * val)
return function_list
def __len__(self):
return len(self.sequences)
class FunctionLogoDifference:
"""
Calculates Kullback-Leibler Divergence and Information Difference as two visualization methods to contrast
sequence and function logos between two taxa and provides methods for text output to be used for
bubble plot visualization.
"""
def __init__(self, pos, functions, pairs, basepairs, singles):
self.pos = pos
self.pairs = pairs
self.singles = singles
self.functions = functions
self.basepairs = basepairs
# _______________________ ID logo Calculations ___________________________________________________
def calculate_logoID_infos(self, info_b, info_f, features):
"""
Calculate information for id-logo (information difference of foreground and background).
"""
id_info = defaultdict(lambda: defaultdict(float))
if features == "singles" or features == "both":
for k in range(self.pos):
logo_b = info_b[k]
logo_f = info_f[k]
for c in self.singles:
id_info[k][c] = logo_f[c] - logo_b[c]
if id_info[k][c] < 0:
id_info[k][c] = 0
if features == "pairs" or features == "both":
for k in self.basepairs:
logo_b = info_b[k]
logo_f = info_f[k]
for c in self.pairs:
id_info[k][c] = logo_f[c] - logo_b[c]
if id_info[k][c] < 0:
id_info[k][c] = 0
return id_info
def calculate_logoID_heights(self, info, ratios, features):
"""
Calculate height of each symbol within a stack for id-logo.
"""
id_height = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
# adding zero to all the functions that do not exist within a stack
if features == "singles" or features == "both":
for single in range(self.pos):
for state in self.singles:
for p in self.functions: # back_self.post[single][state]:
if info[single][state] == 0:
id_height[single][state][p] = 0
else:
id_height[single][state][p] = info[single][state] * ratios[single][state][p] / sum(
ratios[single][state].values())
if features == "pairs" or features == "both":
for pair in self.basepairs:
for state in self.pairs:
for p in self.functions:
if info[pair][state] == 0:
id_height[pair][state][p] = 0
else:
id_height[pair][state][p] = info[pair][state] * ratios[pair][state][p] / sum(
ratios[pair][state].values())
# adding zero to all the functions that do not exist within a stack
for pair in self.basepairs:
for state in self.pairs:
for t in self.functions:
if t not in id_height[pair][state]:
id_height[pair][state][t] = 0
if features == "singles" or features == "both":
for single in range(self.pos):
for state in self.singles:
for t in self.functions:
if t not in id_height[single][state]:
id_height[single][state][t] = 0
for single in range(self.pos):
for state in self.singles:
mysum = sum(id_height[single][state].values())
for p in id_height[single][state]:
if mysum != 0:
id_height[single][state][p] = id_height[single][state][p] / mysum
if features == "pairs" or features == "both":
for pair in self.basepairs:
for state in self.pairs:
mysum = sum(id_height[pair][state].values())
for p in id_height[pair][state]:
if mysum != 0:
id_height[pair][state][p] = id_height[pair][state][p] / mysum
return id_height
# __________________________________________________________________________
def calculate_prob_dist_pseudocounts(self, logo_dict1, logo_dict2,features):
"""
Calculate posterior probability p(y|x) of each symbol within for each feature using pseudo counts.
"""
kld_post_dist = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
kld_prior_dist = defaultdict(float)
functions_array = np.array(list(logo_dict1.functions.values()))
for p in logo_dict1.functions:
kld_prior_dist[p] = logo_dict1.functions[p] / functions_array[functions_array != 0].sum()
# calculating the post of background/foreground
if features == "singles" or features == "both":
for single in range(self.pos):
for state in self.singles:
state_counts1 = logo_dict1.get([single], state)
state_counts2 = logo_dict2.get([single], state)
state_counts = logo_dict1.get([single], state)
if len(state_counts1.keys()) < 21 or len(
state_counts2.keys()) < 21:
for t in self.functions:
if t not in state_counts:
state_counts[t] = 1
else:
state_counts[t] += 1
for p in self.functions:
kld_post_dist[single][state][p] = state_counts[p] / (sum(state_counts.values()))
if features == "pairs" or features == "both":
for pair in self.basepairs:
for state in self.pairs:
state_counts1 = logo_dict1.get(pair, state)
state_counts2 = logo_dict2.get(pair, state)
state_counts = logo_dict1.get(pair, state)
if len(state_counts1.keys()) < 21 or len(
state_counts2.keys()) < 21:
for t in self.functions:
if t not in state_counts:
state_counts[t] = 1
else:
state_counts[t] += 1
for p in self.functions:
kld_post_dist[pair][state][p] = state_counts[p] / (sum(state_counts.values()))
return kld_post_dist, kld_prior_dist
def calculate_prob_dist_nopseudocounts(self, logo_dict,features):
"""
Calculate posterior probability p(y|x) of each symbol within a stack for KLD-logo without pseudocounts.
"""
kld_post_dist = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
if features == "singles" or features == "both":
for single in range(self.pos):
for state in self.singles:
state_counts = logo_dict.get([single], state)
for p in self.functions:
if sum(state_counts.values()) == 0:
kld_post_dist[single][state][p] = 0
else:
kld_post_dist[single][state][p] = state_counts[p] / (sum(state_counts.values()))
if features == "pairs" or features == "both":
for pair in self.basepairs:
for state in self.pairs:
state_counts = logo_dict.get(pair, state)
for p in self.functions:
if sum(state_counts.values()) == 0:
kld_post_dist[pair][state][p] = 0
else:
kld_post_dist[pair][state][p] = state_counts[p] / (sum(state_counts.values()))
return kld_post_dist
def calculate_ratios(self, back_prior, fore_prior, back_post, nopseudo_post_fore, features):
"""
Calculates the ratios of symbols within each stack of ID and KLD logos
"""
ratios = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
if features == "singles" or features == "both":
for single in range(self.pos):
for state in self.singles:
for p in self.functions:
ratios[single][state][p] = (nopseudo_post_fore[single][state][p] / fore_prior[p]) / (
back_post[single][state][p] / back_prior[p])
if features == "pairs" or features == "both":
for pair in self.basepairs:
for state in self.pairs:
for p in self.functions:
ratios[pair][state][p] = (nopseudo_post_fore[pair][state][p] / fore_prior[p]) / (
back_post[pair][state][p] / back_prior[p])
return ratios
def calculate_kld(self, logo_dict, key_back, key_fore, back_prior, fore_prior, back_post, fore_post, ratios,features):
"""
Calculate height of each symbol within a stack for kld-logo.
The height of the individual letters in a stack will be proportional to this ratio
"""
kld_prior = 0
# kld_dic: Dictionary for keeping the height of each stack in kld logo
kld_dic = defaultdict(lambda: defaultdict(float))
for t in self.functions:
kld_prior += fore_prior[t] * np.log2(fore_prior[t] / back_prior[t])
if features == "singles" or features == "both":
for single in range(self.pos):
for state in self.singles:
state_counts_back = logo_dict[key_back].get([single], state)
state_counts_fore = logo_dict[key_fore].get([single], state)
if sum(state_counts_back.values()) == 0: # < 6:
kld_dic[single][state] = 0
continue
if sum(state_counts_fore.values()) == 0:
kld_dic[single][state] = 0
continue
for p in self.functions:
kld_dic[single][state] += fore_post[single][state][p] * np.log2(
fore_post[single][state][p] / back_post[single][state][p])
if features == "pairs" or features == "both":
for pair in self.basepairs:
for state in self.pairs:
state_counts_back = logo_dict[key_back].get(pair, state)
state_counts_fore = logo_dict[key_fore].get(pair, state)
if sum(state_counts_back.values()) == 0: # < 6:
kld_dic[pair][state] = 0
continue
if sum(state_counts_fore.values()) == 0:
kld_dic[pair][state] = 0
continue
for p in self.functions:
kld_dic[pair][state] += fore_post[pair][state][p] * np.log2(
fore_post[pair][state][p] / back_post[pair][state][p])
# kld_heights: a dictionary for the height of each symbol within a stack for kld-logo.
kld_heights = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
if features == "singles" or features == "both":
for single in range(self.pos):
for state in self.singles:
for p in self.functions: # back_post[single][state]:
if kld_dic[single][state] == 0:
kld_heights[single][state][p] = 0
else:
kld_heights[single][state][p] = kld_dic[single][state] * ratios[single][state][p] / sum(
ratios[single][state].values())
if features == "pairs" or features == "both":
for pair in self.basepairs:
for state in self.pairs:
for p in self.functions:
if kld_dic[pair][state] == 0:
kld_heights[pair][state][p] = 0
else:
kld_heights[pair][state][p] = kld_dic[pair][state] * ratios[pair][state][p] / sum(
ratios[pair][state].values())
if features == "singles" or features == "both":
for single in range(self.pos):
for state in self.singles:
mysum = sum(kld_heights[single][state].values())
for p in kld_heights[single][state]:
if mysum != 0:
kld_heights[single][state][p] = kld_heights[single][state][p] / mysum
if features == "pairs" or features == "both":
for pair in self.basepairs:
for state in self.pairs:
mysum = sum(kld_heights[pair][state].values())
for p in kld_heights[pair][state]:
if mysum != 0:
kld_heights[pair][state][p] = kld_heights[pair][state][p] / mysum
return kld_dic, kld_heights
def func_ID_KLD_2table(self, fore_logo_info,
fore_logo_height,
fore_idlogo_info,
back_idlogo_info,
fore_idlogo_height,
back_idlogo_height,
kld_info,
kld_height, back, fore):
"""
Writes a text table for creating bubble plots. table need to be mapped to sprinzl coordinates.
"""
tableDict = {}
nameSet = ["aa", "coord", "state", "fbits", "fht", "gainbits", "gainfht", "lossbits", "lossfht", "convbits",
"convfht", "x", "y", "sprinzl"]
functionlist = list(self.functions)
for name in nameSet:
tableDict[name] = np.zeros(len(functionlist) * self.pos * 4, )
singles = [s for s in self.singles if not "-" in s]
tableDict['coord'] = [single for single in range(self.pos) for state in singles for t in self.functions]
tableDict['aa'] = [t for single in range(self.pos) for state in singles for t in self.functions]
tableDict['state'] = [state for single in range(self.pos) for state in singles for t in self.functions]
tableDict['fht'] = [fore_logo_height[single][state][t] for single in range(self.pos) for state in singles
for t in self.functions]
tableDict['gainfht'] = [fore_idlogo_height[single][state][t] for single in range(self.pos) for state in
singles for t in self.functions]
tableDict['convfht'] = [kld_height[single][state][t] for single in range(self.pos) for state in singles for
t in self.functions]
tableDict['lossfht'] = [back_idlogo_height[single][state][t] for single in range(self.pos) for state in
singles for t in self.functions]
tableDict['fbits'] = [fore_logo_info[single][state] for single in range(self.pos) for state in singles
for t in self.functions]
tableDict['gainbits'] = [fore_idlogo_info[single][state] for single in range(self.pos) for state in singles
for t in self.functions]
tableDict['lossbits'] = [back_idlogo_info[single][state] for single in range(self.pos) for state in singles
for t in self.functions]
tableDict['convbits'] = [kld_info[single][state] for single in range(self.pos) for state in singles for t
in self.functions]
pandasTable = pd.DataFrame(tableDict)
roundcols = ["fbits", "fht", "gainbits", "gainfht", "lossbits", "lossfht", "convbits",
"convfht"]
pandasTable[roundcols] = pandasTable[roundcols].round(4)
pandasTable['coord'] = pandasTable['coord'] + 1
filename = "F_" + fore + "_B_" + back + "_Table.txt"
pandasTable.to_csv(filename, index=None, sep='\t')
# _______________________ KLD/ID logo significance calculations ___________________________________________________
def calculate_kld_significance(self, logo_dict, kld_infos, permute_num, proc, pmethod, exceedances, targetperms,
peaks, alpha, features):
pvalue = {}
CI_lower = {}
CI_upper = {}
permnum = {}
ptype = {}
gpd_shape = {}
gpd_scale = {}
gpd_exceedances_size = {}
gpd_ADtest = {}
b_freq_table = {}
f_freq_table = {}
start_single = 0
start_pair = 0
end_single = 0
kld = {}
for key in kld_infos.keys():
kld[key] = defaultdict(defaultdict)
pvalue[key] = defaultdict(lambda: defaultdict(float))
CI_lower[key] = defaultdict(lambda: defaultdict(float))
CI_upper[key] = defaultdict(lambda: defaultdict(float))
permnum[key] = defaultdict(lambda: defaultdict(float))
ptype[key] = defaultdict(lambda: defaultdict(float))
b_freq_table[key] = defaultdict(lambda: defaultdict(float))
f_freq_table[key] = defaultdict(lambda: defaultdict(float))
gpd_shape[key] = defaultdict(lambda: defaultdict(float))
gpd_scale[key] = defaultdict(lambda: defaultdict(float))
gpd_exceedances_size[key] = defaultdict(lambda: defaultdict(float))
gpd_ADtest[key] = defaultdict(lambda: defaultdict(float))
for single in range(self.pos):
for state in kld_infos[key][single]:
kld[key][single][state] = kld_infos[key][single][state]
for pair in self.basepairs:
for basepair in kld_infos[key][pair]:
kld[key][pair][basepair] = kld_infos[key][pair][basepair]
with Pool(processes=proc) as pool:
perm_jobs = []
for x in range(proc):
if x == 0:
end_pair = len(self.basepairs) // proc + len(self.basepairs) % proc
end_single = self.pos // proc + self.pos % proc
perm_jobs.append((list(range(start_single, end_single)), permute_num, logo_dict, kld, start_pair,
end_pair, pmethod, exceedances, targetperms, peaks, alpha,features))
else:
start_pair = end_pair
end_pair = start_pair + len(self.basepairs) // proc
start_single = end_single
end_single = end_single + self.pos // proc
perm_jobs.append((list(range(start_single, end_single)), permute_num, logo_dict, kld, start_pair,
end_pair, pmethod, exceedances, targetperms, peaks, alpha,features))
significant_calc_outputs = pool.starmap(self.perm_kld_calc_pvalue, perm_jobs, 1)
for x in significant_calc_outputs:
for key in logo_dict.keys():
for single in x["pvalue"][key]:
for state in x["pvalue"][key][single]:
pvalue[key][single][state] = x["pvalue"][key][single][state]
CI_lower[key][single][state] = x["CI_lower"][key][single][state]
CI_upper[key][single][state] = x["CI_upper"][key][single][state]
permnum[key][single][state] = x["permnum"][key][single][state]
ptype[key][single][state] = x["ptype"][key][single][state]
b_freq_table[key][single][state] = x["bt"][key][single][state]
f_freq_table[key][single][state] = x["ft"][key][single][state]
gpd_shape[key][single][state] = x["shape"][key][single][state]
gpd_scale[key][single][state] = x["scale"][key][single][state]
gpd_exceedances_size[key][single][state] = x["excnum"][key][single][state]
gpd_ADtest[key][single][state] = x["ADtest"][key][single][state]
return pvalue, CI_lower, CI_upper, permnum, ptype, b_freq_table, f_freq_table, gpd_shape, gpd_scale, gpd_exceedances_size, gpd_ADtest
def perm_kld_calc_pvalue(self, positions, permute_num, logo_dic, kld_infos, start_pair, end_pair, pmethod,
exceedances, targetperms, peaks, alpha,features):
significant_calc_outputs = {}
pvalue = {}
CI_lower = {}
CI_upper = {}
permnum = {}
ptype = {}
gpd_shape = {}
gpd_scale = {}
gpd_exceedances_size = {}
gpd_ADtest = {}
b_freq_table = {}
f_freq_table = {}
pairwise_combinations = itertools.permutations(logo_dic.keys(), 2)
for pair in pairwise_combinations:
pvalue[pair[0]] = defaultdict(defaultdict)
CI_lower[pair[0]] = defaultdict(defaultdict)
CI_upper[pair[0]] = defaultdict(defaultdict)
permnum[pair[0]] = defaultdict(defaultdict)
ptype[pair[0]] = defaultdict(defaultdict)
b_freq_table[pair[0]] = defaultdict(defaultdict)
f_freq_table[pair[0]] = defaultdict(defaultdict)
gpd_shape[pair[0]] = defaultdict(defaultdict)
gpd_scale[pair[0]] = defaultdict(defaultdict)
gpd_exceedances_size[pair[0]] = defaultdict(defaultdict)
gpd_ADtest[pair[0]] = defaultdict(defaultdict)
if features == "singles" or features == "both":
for single in positions:
for state in self.singles:
state_counts_back = logo_dic[pair[0]].get([single], state)
state_counts_fore = logo_dic[pair[1]].get([single], state)
if sum(state_counts_back.values()) == 0 or sum(state_counts_fore.values()) == 0:
continue
(
pvalue[pair[0]][single][state],
CI_lower[pair[0]][single][state],
CI_upper[pair[0]][single][state],
permnum[pair[0]][single][state],
ptype[pair[0]][single][state],
b_freq_table[pair[0]][single][state],
f_freq_table[pair[0]][single][state],
gpd_shape[pair[0]][single][state],
gpd_scale[pair[0]][single][state],
gpd_exceedances_size[pair[0]][single][state],
gpd_ADtest[pair[0]][single][state],
) = self.calc_KLD_pvalue(permute_num, state_counts_back, state_counts_fore,
sum(state_counts_back.values()), kld_infos[pair[0]][single][state],
pmethod, exceedances, targetperms, peaks, alpha)
if features == "pairs" or features == "both":
for basepair in self.basepairs[start_pair:end_pair]:
for state in kld_infos[pair[0]][basepair]:
state_counts_back = logo_dic[pair[0]].get(basepair, state)
state_counts_fore = logo_dic[pair[1]].get(basepair, state)
if sum(state_counts_back.values()) == 0 or sum(state_counts_fore.values()) == 0:
continue
(
pvalue[pair[0]][basepair][state],
CI_lower[pair[0]][basepair][state],
CI_upper[pair[0]][basepair][state],
permnum[pair[0]][basepair][state],
ptype[pair[0]][basepair][state],
b_freq_table[pair[0]][basepair][state],
f_freq_table[pair[0]][basepair][state],
gpd_shape[pair[0]][basepair][state],
gpd_scale[pair[0]][basepair][state],
gpd_exceedances_size[pair[0]][basepair][state],
gpd_ADtest[pair[0]][basepair][state],
) = self.calc_KLD_pvalue(permute_num, state_counts_back, state_counts_fore,
sum(state_counts_back.values()), kld_infos[pair[0]][basepair][state],
pmethod, exceedances, targetperms, peaks, alpha)
significant_calc_outputs["pvalue"] = pvalue
significant_calc_outputs["CI_lower"] = CI_lower
significant_calc_outputs["CI_upper"] = CI_upper
significant_calc_outputs["permnum"] = permnum
significant_calc_outputs["ptype"] = ptype
significant_calc_outputs["bt"] = b_freq_table
significant_calc_outputs["ft"] = f_freq_table
significant_calc_outputs["shape"] = gpd_shape
significant_calc_outputs["scale"] = gpd_scale
significant_calc_outputs["excnum"] = gpd_exceedances_size
significant_calc_outputs["ADtest"] = gpd_ADtest
return significant_calc_outputs
def calc_KLD_pvalue(self, maxPerm, class_counts_b, class_counts_f, back_size, orig_kld, pmethod, exceedances,
targetperms, peaks, alpha):
if pmethod == "ECDF_pseudo":
perm_kld_values = self.calc_permvalues_kld(maxPerm, class_counts_b, class_counts_f, back_size)
return self.calc_pecdf_with_pseudo(perm_kld_values, orig_kld, class_counts_b, class_counts_f)
if pmethod == "ECDF":
return self.calc_pecdf_kld(maxPerm, class_counts_b, class_counts_f, back_size, orig_kld, exceedances, alpha)
if pmethod == "GPD":
return self.calc_pgpd_ecdf_kld(maxPerm, class_counts_b, class_counts_f, back_size, orig_kld, exceedances,
targetperms, peaks, alpha)
def calc_permvalues_kld(self, maxPerm, class_counts_b, class_counts_f, back_size):
aaclasslist = []
for aaclass in class_counts_b.keys():
aaclasslist.extend(aaclass * class_counts_b[aaclass])
for aaclass in class_counts_f.keys():
aaclasslist.extend(aaclass * class_counts_f[aaclass])
indices = []
permKLDs = []
for p in range(maxPerm):
indices.append(self.shuffled(aaclasslist))
for index in indices:
permKLD = 0
p_state_counts_back = Counter()
p_state_counts_fore = Counter()
for i, aaclass in enumerate(index):
if i < back_size:
p_state_counts_back[aaclass] += 1
else:
p_state_counts_fore[aaclass] += 1
if len(p_state_counts_back.keys()) < 21 or len(
p_state_counts_fore.keys()) < 21:
for t in self.functions:
if t not in p_state_counts_back:
p_state_counts_back[t] = 1
else:
p_state_counts_back[t] += 1
if t not in p_state_counts_fore:
p_state_counts_fore[t] = 1
else:
p_state_counts_fore[t] += 1
for p in self.functions:
kld_post_dist_back = p_state_counts_back[p] / (sum(p_state_counts_back.values()))
kld_post_dist_fore = p_state_counts_fore[p] / (sum(p_state_counts_fore.values()))
permKLD += kld_post_dist_fore * np.log2(
kld_post_dist_fore / kld_post_dist_back)
permKLDs.append(permKLD)
return permKLDs
def calc_pecdf_with_pseudo(self, perm_infos, point, class_counts_b, class_counts_f):
count = sum(i >= point for i in perm_infos)
P = (count + 1) / (len(perm_infos) + 1)
b_aaclasstable = ""
f_aaclasstable = ""
for letter, count in sorted(class_counts_b.items()):
b_aaclasstable += letter + str(count)
for letter, count in sorted(class_counts_f.items()):
f_aaclasstable += letter + str(count)
return P, None, None, len(
perm_infos), "p_ecdf_with_pseudo", b_aaclasstable, f_aaclasstable, None, None, None, None
def calc_pecdf_kld(self, maxPerm, class_counts_b, class_counts_f, back_size, orig_kld, exceedances, alpha):
b_aaclasstable = ""
f_aaclasstable = ""
aaclasslist = []
for letter, count in sorted(class_counts_b.items()):
b_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
for letter, count in sorted(class_counts_f.items()):
f_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
permKLDs = []
permcount = 0
exceedances_count = 0
while permcount < maxPerm:
permcount = permcount + 1
shuffled_aa = self.shuffled(aaclasslist)
p_state_counts_back = Counter()
p_state_counts_fore = Counter()
for (i, aaclass) in enumerate(shuffled_aa):
if i < back_size:
p_state_counts_back[aaclass] += 1
else:
p_state_counts_fore[aaclass] += 1
if len(p_state_counts_back.keys()) < 21 \
or len(p_state_counts_fore.keys()) < 21:
for t in self.functions:
if t not in p_state_counts_back:
p_state_counts_back[t] = 1
else:
p_state_counts_back[t] += 1
if t not in p_state_counts_fore:
p_state_counts_fore[t] = 1
else:
p_state_counts_fore[t] += 1
permKLD = 0
for p in self.functions:
kld_post_dist_back = p_state_counts_back[p] \
/ sum(p_state_counts_back.values())
kld_post_dist_fore = p_state_counts_fore[p] \
/ sum(p_state_counts_fore.values())
permKLD += kld_post_dist_fore * np.log2(kld_post_dist_fore
/ kld_post_dist_back)
permKLDs.append(permKLD)
if permKLD >= orig_kld:
exceedances_count = exceedances_count + 1
if exceedances_count >= exceedances:
P = exceedances_count / len(permKLDs)
P_CI = [norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount),
norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount)]
return P, P_CI[0], P_CI[
1], permcount, "p_ecdf", b_aaclasstable, f_aaclasstable, None, None, None, None
P = (exceedances_count + 1) / (len(permKLDs) + 1)
return P, None, None, permcount, "p_ecdf_with_pseudo", b_aaclasstable, f_aaclasstable, None, None, None, None
def calc_pgpd_ecdf_kld(self, maxPerm, class_counts_b, class_counts_f, back_size, orig_kld, exceedances, targetperms,
peaks, alpha):
b_aaclasstable = ""
f_aaclasstable = ""
aaclasslist = []
for letter, count in sorted(class_counts_b.items()):
b_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
for letter, count in sorted(class_counts_f.items()):
f_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
permKLDs = []
permcount = 0
exceedances_count = 0
while permcount < maxPerm:
permcount = permcount + 1
shuffled_aa = self.shuffled(aaclasslist)
p_state_counts_back = Counter()
p_state_counts_fore = Counter()
for (i, aaclass) in enumerate(shuffled_aa):
if i < back_size:
p_state_counts_back[aaclass] += 1
else:
p_state_counts_fore[aaclass] += 1
if len(p_state_counts_back.keys()) < 21 \
or len(p_state_counts_fore.keys()) < 21:
for t in self.functions:
if t not in p_state_counts_back:
p_state_counts_back[t] = 1
else:
p_state_counts_back[t] += 1
if t not in p_state_counts_fore:
p_state_counts_fore[t] = 1
else:
p_state_counts_fore[t] += 1
permKLD = 0
for p in self.functions:
kld_post_dist_back = p_state_counts_back[p] \
/ sum(p_state_counts_back.values())
kld_post_dist_fore = p_state_counts_fore[p] \
/ sum(p_state_counts_fore.values())
permKLD += kld_post_dist_fore * np.log2(kld_post_dist_fore
/ kld_post_dist_back)
permKLDs.append(permKLD)
if permKLD >= orig_kld:
exceedances_count = exceedances_count + 1
if exceedances_count == exceedances:
P = exceedances_count / permcount
P_CI = [norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount),
norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount)]
return P, P_CI[0], P_CI[
1], permcount, "p_ecdf", b_aaclasstable, f_aaclasstable, None, None, None, None
else:
if permcount >= targetperms:
E = min(peaks, permcount // 3)
permKLDs_5p = list(map(lambda x: x ** 5, permKLDs))
threshold = (sorted(np.partition(permKLDs_5p, -(E + 1))[-(E + 1):])[0] +
sorted(np.partition(permKLDs_5p, -(E + 1))[-(E + 1):])[1]) / 2
permKLDs_5p_t = list(map(lambda x: x - threshold, permKLDs_5p))
warnings.filterwarnings("ignore")
fit_gpd = self.check_fit_gpd(np.partition(permKLDs_5p_t, -E)[-E:])
while fit_gpd is not True:
E = E - 10
if E < 10:
break
threshold = (sorted(np.partition(permKLDs_5p, -(E + 1))[-(E + 1):])[0] +
sorted(np.partition(permKLDs_5p, -(E + 1))[-(E + 1):])[1]) / 2
permKLDs_5p_t = list(map(lambda x: x - threshold, permKLDs_5p))
fit_gpd = self.check_fit_gpd(np.partition(permKLDs_5p_t, -E)[-E:])
if fit_gpd is True:
shape, loc, scale = genpareto.fit(np.partition(permKLDs_5p_t, -E)[-E:], floc=0)
gpd_pvalue = (1 - genpareto.cdf((orig_kld ** 5) - threshold, shape, loc, scale)) * E / permcount
if gpd_pvalue == 0:
targetperms = min(targetperms * 2, maxPerm)
if permcount == maxPerm:
P = (exceedances_count + 1) / (len(permKLDs) + 1)
return P, None, None, permcount, "p_ecdf_with_pseudo (p_gpd=0)", b_aaclasstable, f_aaclasstable, shape, scale, E, ad_test(
np.partition(permKLDs_5p_t, -E)[-E:],
genpareto(c=shape, scale=scale, loc=loc)).pvalue
continue
P_CI = self.calculate_gpd_CI(alpha, np.partition(permKLDs_5p_t, -E)[-E:], permcount, shape,
scale, (orig_kld ** 5) - threshold)
return gpd_pvalue, P_CI[0], P_CI[
1], permcount, "p_gpd", b_aaclasstable, f_aaclasstable, shape, scale, E, ad_test(
np.partition(permKLDs_5p_t, -E)[-E:], genpareto(c=shape, scale=scale, loc=loc)).pvalue
else:
targetperms = min(targetperms * 2, maxPerm)
P = (exceedances_count + 1) / (len(permKLDs) + 1)
return P, None, None, permcount, "p_ecdf_with_pseudo", b_aaclasstable, f_aaclasstable, None, None, None, None
def calculate_gpd_CI(self, alpha, Zi, permcount, shape, scale, orig_stat):
MIF = self.calculate_FIM(Zi, shape, scale)
INV_MIF = np.linalg.pinv(MIF)
u, d, v = np.linalg.svd(INV_MIF, full_matrices=True)
d1 = d[0]
Xi01 = shape - norm.ppf(1 - np.sqrt(alpha) / 2, loc=0, scale=1) * np.sqrt(d1)
Xi02 = shape + norm.ppf(1 - np.sqrt(alpha) / 2, loc=0, scale=1) * np.sqrt(d1)
d2 = d[1]
sigma01 = scale - norm.ppf(1 - np.sqrt(alpha) / 2, loc=0, scale=1) * np.sqrt(d2)
sigma02 = scale + norm.ppf(1 - np.sqrt(alpha) / 2, loc=0, scale=1) * np.sqrt(d2)
xi1_sigma1 = np.matmul(v, [Xi01 - shape, sigma01 - scale]) + [shape, scale]
xi1 = xi1_sigma1[0]
sigma1 = xi1_sigma1[1]
xi2_sigma2 = np.matmul(v, [Xi02 - shape, sigma02 - scale]) + [shape, scale]
xi2 = xi2_sigma2[0]
sigma2 = xi2_sigma2[1]
Pr_CI = [
min((1 - genpareto.cdf(orig_stat, xi1, 0, sigma1)),
(1 - genpareto.cdf(orig_stat, xi2, 0, sigma1)),
(1 - genpareto.cdf(orig_stat, xi2, 0, sigma2)),
(1 - genpareto.cdf(orig_stat, xi1, 0, sigma2)))
,
max((1 - genpareto.cdf(orig_stat, xi1, 0, sigma1)),
(1 - genpareto.cdf(orig_stat, xi2, 0, sigma1)),
(1 - genpareto.cdf(orig_stat, xi2, 0, sigma2)),
(1 - genpareto.cdf(orig_stat, xi1, 0, sigma2)))
]
Pnr_CI = [
norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(
(len(Zi) / permcount) * (1 - len(Zi) / permcount) / permcount),
norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(
(len(Zi) / permcount) * (1 - len(Zi) / permcount) / permcount)
]
CI = [Pr_CI[0] * Pnr_CI[0], Pr_CI[1] * Pnr_CI[1]]
return CI
def calculate_FIM(self, Zi, shape, scale):
MIF11 = (2 / (shape ** 3)) * sum(np.log(1 + shape * (Zi[i] / scale)) for i in range(len(Zi))) - (
2 / (shape ** 2)) * sum(Zi[i] / (scale + shape * Zi[i]) for i in range(len(Zi))) - (
1 + (1 / shape)) * sum((Zi[i] / (scale + shape * Zi[i])) ** 2 for i in range(len(Zi)))
MIF22 = (len(Zi) / (shape * (scale ** 2))) - (1 + (1 / shape)) * sum(
(1 / (scale + shape * Zi[i])) ** 2 for i in range(len(Zi)))
MIF21_MIF12 = (len(Zi) / ((shape ** 2) * scale)) - (1 / (shape ** 2)) * sum(
1 / (scale + shape * Zi[i]) for i in range(len(Zi))) - (1 + (1 / shape)) * sum(
Zi[i] / ((scale + shape * Zi[i]) ** 2) for i in range(len(Zi)))
MIF = np.array([[-MIF11, -MIF21_MIF12], [-MIF21_MIF12, -MIF22]])
return MIF
def check_fit_gpd(self, sample):
shape, loc, scale = genpareto.fit(sample, floc=0)
fit = False
if ad_test(sample, genpareto(c=shape, scale=scale, loc=loc)).pvalue > 0.05:
fit = True
return fit
def shuffled(self, items, pieces=2):
random.seed(int.from_bytes(os.urandom(4), byteorder='little'))
sublists = [[] for i in range(pieces)]
for x in items:
sublists[random.randint(0, pieces - 1)].append(x)
permutedList = []
for i in range(pieces):
time.sleep(0.01)
random.seed()
random.shuffle(sublists[i])
permutedList.extend(sublists[i])
return permutedList
def calculate_id_significance(self, logo_dict, id_infos, permute_num, proc, max, entropy, pmethod, exceedances,
targetperms, peaks, alpha,features):
pvalue = {}
CI_lower = {}
CI_upper = {}
permnum = {}
ptype = {}
gpd_shape = {}
gpd_scale = {}
gpd_exceedances_size = {}
gpd_ADtest = {}
b_freq_table = {}
f_freq_table = {}
start_single = 0
start_pair = 0
end_single = 0
id = {}
for key in id_infos.keys():
id[key] = defaultdict(defaultdict)
pvalue[key] = defaultdict(lambda: defaultdict(float))
CI_lower[key] = defaultdict(lambda: defaultdict(float))
CI_upper[key] = defaultdict(lambda: defaultdict(float))
permnum[key] = defaultdict(lambda: defaultdict(float))
ptype[key] = defaultdict(lambda: defaultdict(float))
b_freq_table[key] = defaultdict(lambda: defaultdict(float))
f_freq_table[key] = defaultdict(lambda: defaultdict(float))
gpd_shape[key] = defaultdict(lambda: defaultdict(float))
gpd_scale[key] = defaultdict(lambda: defaultdict(float))
gpd_exceedances_size[key] = defaultdict(lambda: defaultdict(float))
gpd_ADtest[key] = defaultdict(lambda: defaultdict(float))
for single in range(self.pos):
for state in id_infos[key][single]:
id[key][single][state] = id_infos[key][single][state]
for pair in self.basepairs:
for basepair in id_infos[key][pair]:
id[key][pair][basepair] = id_infos[key][pair][basepair]
with Pool(processes=proc) as pool:
perm_jobs = []
for x in range(proc):
if x == 0:
end_pair = len(self.basepairs) // proc + len(self.basepairs) % proc
end_single = self.pos // proc + self.pos % proc
perm_jobs.append(
(list(range(start_single, end_single)), permute_num, logo_dict, id, start_pair, end_pair, max,
entropy, pmethod, exceedances, targetperms, peaks, alpha,features))
else:
start_pair = end_pair
end_pair = start_pair + len(self.basepairs) // proc
start_single = end_single
end_single = end_single + self.pos // proc
perm_jobs.append(
(list(range(start_single, end_single)), permute_num, logo_dict, id, start_pair, end_pair, max,
entropy, pmethod, exceedances, targetperms, peaks, alpha,features))
significant_calc_outputs = pool.starmap(self.cal_perm_id_pvalue, perm_jobs, 1)
for x in significant_calc_outputs:
for key in logo_dict.keys():
for single in x["pvalue"][key]:
for state in x["pvalue"][key][single]:
pvalue[key][single][state] = x["pvalue"][key][single][state]
CI_lower[key][single][state] = x["CI_lower"][key][single][state]
CI_upper[key][single][state] = x["CI_upper"][key][single][state]
permnum[key][single][state] = x["permnum"][key][single][state]
ptype[key][single][state] = x["ptype"][key][single][state]
b_freq_table[key][single][state] = x["bt"][key][single][state]
f_freq_table[key][single][state] = x["ft"][key][single][state]
gpd_shape[key][single][state] = x["shape"][key][single][state]
gpd_scale[key][single][state] = x["scale"][key][single][state]
gpd_exceedances_size[key][single][state] = x["excnum"][key][single][state]
gpd_ADtest[key][single][state] = x["ADtest"][key][single][state]
return pvalue, CI_lower, CI_upper, permnum, ptype, b_freq_table, f_freq_table, gpd_shape, gpd_scale, gpd_exceedances_size, gpd_ADtest
def cal_perm_id_pvalue(self, positions, permute_num, logo_dic, id_infos, start_pair, end_pair, max, entropy,
pmethod, exceedances, targetperms, peaks, alpha,features):
significant_calc_outputs = {}
pvalue = {}
CI_lower = {}
CI_upper = {}
permnum = {}
ptype = {}
gpd_shape = {}
gpd_scale = {}
gpd_exceedances_size = {}
gpd_ADtest = {}
b_freq_table = {}
f_freq_table = {}
pairwise_combinations = itertools.permutations(logo_dic.keys(), 2)
for pair in pairwise_combinations:
pvalue[pair[0]] = defaultdict(defaultdict)
CI_lower[pair[0]] = defaultdict(defaultdict)
CI_upper[pair[0]] = defaultdict(defaultdict)
permnum[pair[0]] = defaultdict(defaultdict)
ptype[pair[0]] = defaultdict(defaultdict)
b_freq_table[pair[0]] = defaultdict(defaultdict)
f_freq_table[pair[0]] = defaultdict(defaultdict)
gpd_shape[pair[0]] = defaultdict(defaultdict)
gpd_scale[pair[0]] = defaultdict(defaultdict)
gpd_exceedances_size[pair[0]] = defaultdict(defaultdict)
gpd_ADtest[pair[0]] = defaultdict(defaultdict)
if features == "singles" or features == "both":
for single in positions:
for state in self.singles:
state_counts_back = logo_dic[pair[0]].get([single], state)
state_counts_fore = logo_dic[pair[1]].get([single], state)
if (sum(state_counts_back.values()) == 0) or (sum(state_counts_fore.values()) == 0):
continue
if entropy == "NSB":
(
pvalue[pair[0]][single][state],
CI_lower[pair[0]][single][state],
CI_upper[pair[0]][single][state],
permnum[pair[0]][single][state],
ptype[pair[0]][single][state],
b_freq_table[pair[0]][single][state],
f_freq_table[pair[0]][single][state],
gpd_shape[pair[0]][single][state],
gpd_scale[pair[0]][single][state],
gpd_exceedances_size[pair[0]][single][state],
gpd_ADtest[pair[0]][single][state],
) = self.calc_ID_pvalue_NSB(permute_num, state_counts_back,
state_counts_fore,
sum(state_counts_back.values()),
logo_dic[pair[0]].functions,
logo_dic[pair[1]].functions, max,
id_infos[pair[0]][single][state], pmethod, exceedances, targetperms,
peaks, alpha)
if entropy == "MM":
(
pvalue[pair[0]][single][state],
CI_lower[pair[0]][single][state],
CI_upper[pair[0]][single][state],
permnum[pair[0]][single][state],
ptype[pair[0]][single][state],
b_freq_table[pair[0]][single][state],
f_freq_table[pair[0]][single][state],
gpd_shape[pair[0]][single][state],
gpd_scale[pair[0]][single][state],
gpd_exceedances_size[pair[0]][single][state],
gpd_ADtest[pair[0]][single][state],
) = self.calc_ID_pvalue_MM(permute_num, state_counts_back,
state_counts_fore,
sum(state_counts_back.values()),
logo_dic[pair[0]].functions,
logo_dic[pair[1]].functions, max,
id_infos[pair[0]][single][state], pmethod, exceedances, targetperms,
peaks, alpha)
if features == "pairs" or features == "both":
for basepair in self.basepairs[start_pair:end_pair]:
for state in id_infos[pair[0]][basepair]:
state_counts_back = logo_dic[pair[0]].get(basepair, state)
state_counts_fore = logo_dic[pair[1]].get(basepair, state)
if (sum(state_counts_back.values()) == 0) or (sum(state_counts_fore.values()) == 0):
continue
if entropy == "NSB":
(
pvalue[pair[0]][basepair][state],
CI_lower[pair[0]][basepair][state],
CI_upper[pair[0]][basepair][state],
permnum[pair[0]][basepair][state],
ptype[pair[0]][basepair][state],
b_freq_table[pair[0]][basepair][state],
f_freq_table[pair[0]][basepair][state],
gpd_shape[pair[0]][basepair][state],
gpd_scale[pair[0]][basepair][state],
gpd_exceedances_size[pair[0]][basepair][state],
gpd_ADtest[pair[0]][basepair][state],
) = self.calc_ID_pvalue_NSB(permute_num, state_counts_back,
state_counts_fore,
sum(state_counts_back.values()),
logo_dic[pair[0]].functions,
logo_dic[pair[1]].functions, max,
id_infos[pair[0]][basepair][state], pmethod, exceedances,
targetperms, peaks, alpha)
if entropy == "MM":
(
pvalue[pair[0]][basepair][state],
CI_lower[pair[0]][basepair][state],
CI_upper[pair[0]][basepair][state],
permnum[pair[0]][basepair][state],
ptype[pair[0]][basepair][state],
b_freq_table[pair[0]][basepair][state],
f_freq_table[pair[0]][basepair][state],
gpd_shape[pair[0]][basepair][state],
gpd_scale[pair[0]][basepair][state],
gpd_exceedances_size[pair[0]][basepair][state],
gpd_ADtest[pair[0]][basepair][state],
) = self.calc_ID_pvalue_MM(permute_num, state_counts_back,
state_counts_fore,
sum(state_counts_back.values()),
logo_dic[pair[0]].functions,
logo_dic[pair[1]].functions, max,
id_infos[pair[0]][basepair][state], pmethod, exceedances,
targetperms, peaks, alpha)
significant_calc_outputs["pvalue"] = pvalue
significant_calc_outputs["CI_lower"] = CI_lower
significant_calc_outputs["CI_upper"] = CI_upper
significant_calc_outputs["permnum"] = permnum
significant_calc_outputs["ptype"] = ptype
significant_calc_outputs["bt"] = b_freq_table
significant_calc_outputs["ft"] = f_freq_table
significant_calc_outputs["shape"] = gpd_shape
significant_calc_outputs["scale"] = gpd_scale
significant_calc_outputs["excnum"] = gpd_exceedances_size
significant_calc_outputs["ADtest"] = gpd_ADtest
return significant_calc_outputs
def calc_ID_pvalue_NSB(self, maxPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max, orig_id, pmethod, exceedances, targetperms, peaks, alpha):
if orig_id == 0:
return 1, None, None, None, None, None, None, None, None, None, None
if pmethod == "ECDF_pseudo":
perm_id_nsb_values = self.calc_permvalues_id_nsb(maxPerm, class_counts_b, class_counts_f, back_size,
b_functions, f_functions, max)
return self.calc_pecdf_with_pseudo(perm_id_nsb_values, orig_id, class_counts_b, class_counts_f)
if pmethod == "ECDF":
return self.calc_pecdf_id_nsb(maxPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max, orig_id, exceedances, alpha)
if pmethod == "GPD":
return self.calc_pgpd_ecdf_id_nsb(maxPerm, class_counts_b, class_counts_f, back_size, b_functions,
f_functions,
max, orig_id, exceedances, targetperms, peaks, alpha)
def calc_permvalues_id_nsb(self, numPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max):
class_list = []
for aaclass in class_counts_b.keys():
class_list.extend(aaclass * class_counts_b[aaclass])
for aaclass in class_counts_f.keys():
class_list.extend(aaclass * class_counts_f[aaclass])
indices = []
permIDs = []
for p in range(numPerm):
indices.append(self.shuffled(class_list))
for index in indices:
p_state_counts_back = Counter()
p_state_counts_fore = Counter()
for i, aaclass in enumerate(index):
if i < back_size:
p_state_counts_back[aaclass] += 1
else:
p_state_counts_fore[aaclass] += 1
exact = self.calculate_perm_exact(max, f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore))
# calculate info for the Foreground _______________________________________________________________________
functions_array = np.array(
list((f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_fore.values()) + [0] * (len(f_functions) - len(p_state_counts_fore)))
if sum(p_state_counts_fore.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_fore.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy) < 0:
info_fore = 0
else:
info_fore = expected_bg_entropy - fg_entropy
# calculate info for the Background _______________________________________________________________________
exact = self.calculate_perm_exact(max, b_functions - Counter(class_counts_b) + Counter(p_state_counts_back))
functions_array = np.array(
list((b_functions - Counter(class_counts_b) + Counter(p_state_counts_back)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_back.values()) + [0] * (len(b_functions) - len(p_state_counts_back)))
if sum(p_state_counts_back.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_back.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy) < 0:
info_back = 0
else:
info_back = expected_bg_entropy - fg_entropy
id_info = info_fore - info_back
if id_info < 0:
id_info = 0
permIDs.append(id_info)
return permIDs
def calc_pecdf_id_nsb(self, maxPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max, orig_id, exceedances, alpha):
b_aaclasstable = ""
f_aaclasstable = ""
aaclasslist = []
for letter, count in sorted(class_counts_b.items()):
b_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
for letter, count in sorted(class_counts_f.items()):
f_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
permIDs = []
permcount = 0
exceedances_count = 0
while permcount < maxPerm:
permcount = permcount + 1
shuffled_aa = self.shuffled(aaclasslist)
p_state_counts_back = Counter()
p_state_counts_fore = Counter()
for (i, aaclass) in enumerate(shuffled_aa):
if i < back_size:
p_state_counts_back[aaclass] += 1
else:
p_state_counts_fore[aaclass] += 1
exact = self.calculate_perm_exact(max, f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore))
# calculate info for the Foreground _______________________________________________________________________
functions_array = np.array(
list((f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_fore.values()) + [0] * (len(f_functions) - len(p_state_counts_fore)))
if sum(p_state_counts_fore.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_fore.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy) < 0:
info_fore = 0
else:
info_fore = expected_bg_entropy - fg_entropy
# calculate info for the Background _______________________________________________________________________
exact = self.calculate_perm_exact(max, b_functions - Counter(class_counts_b) + Counter(p_state_counts_back))
functions_array = np.array(
list((b_functions - Counter(class_counts_b) + Counter(p_state_counts_back)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_back.values()) + [0] * (len(b_functions) - len(p_state_counts_back)))
if sum(p_state_counts_back.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_back.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy) < 0:
info_back = 0
else:
info_back = expected_bg_entropy - fg_entropy
id_info = info_fore - info_back
if id_info < 0:
id_info = 0
permIDs.append(id_info)
if id_info >= orig_id:
exceedances_count = exceedances_count + 1
if exceedances_count >= exceedances:
P = exceedances_count / len(permIDs)
P_CI = [norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount),
norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount)]
return P, P_CI[0], P_CI[
1], permcount, "p_ecdf", b_aaclasstable, f_aaclasstable, None, None, None, None
P = (exceedances_count + 1) / (len(permIDs) + 1)
return P, None, None, permcount, "p_ecdf_with_pseudo", b_aaclasstable, f_aaclasstable, None, None, None, None
def calc_pgpd_ecdf_id_nsb(self, maxPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max, orig_id, exceedances, targetperms, peaks, alpha):
b_aaclasstable = ""
f_aaclasstable = ""
aaclasslist = []
for letter, count in sorted(class_counts_b.items()):
b_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
for letter, count in sorted(class_counts_f.items()):
f_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
permIDs = []
permcount = 0
exceedances_count = 0
while permcount < maxPerm:
permcount = permcount + 1
shuffled_aa = self.shuffled(aaclasslist)
p_state_counts_back = Counter()
p_state_counts_fore = Counter()
for (i, aaclass) in enumerate(shuffled_aa):
if i < back_size:
p_state_counts_back[aaclass] += 1
else:
p_state_counts_fore[aaclass] += 1
exact = self.calculate_perm_exact(max, f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore))
# calculate info for the Foreground _______________________________________________________________________
functions_array = np.array(
list((f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_fore.values()) + [0] * (len(f_functions) - len(p_state_counts_fore)))
if sum(p_state_counts_fore.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_fore.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy) < 0:
info_fore = 0
else:
info_fore = expected_bg_entropy - fg_entropy
# calculate info for the Background _______________________________________________________________________
exact = self.calculate_perm_exact(max, b_functions - Counter(class_counts_b) + Counter(p_state_counts_back))
functions_array = np.array(
list((b_functions - Counter(class_counts_b) + Counter(p_state_counts_back)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_back.values()) + [0] * (len(b_functions) - len(p_state_counts_back)))
if sum(p_state_counts_back.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_back.values()) - 1]
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
else:
expected_bg_entropy = bg_entropy
fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)
if (expected_bg_entropy - fg_entropy) < 0:
info_back = 0
else:
info_back = expected_bg_entropy - fg_entropy
id_info = info_fore - info_back
if id_info < 0:
id_info = 0
permIDs.append(id_info)
if id_info >= orig_id:
exceedances_count = exceedances_count + 1
if exceedances_count == exceedances:
P = exceedances_count / len(permIDs)
P_CI = [norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount),
norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount)]
return P, P_CI[0], P_CI[
1], permcount, "p_ecdf", b_aaclasstable, f_aaclasstable, None, None, None, None
else:
if permcount >= targetperms:
E = min(peaks, permcount // 3)
permIDs_5p = list(map(lambda x: x ** 5, permIDs))
threshold = (sorted(np.partition(permIDs_5p, -(E + 1))[-(E + 1):])[0] +
sorted(np.partition(permIDs_5p, -(E + 1))[-(E + 1):])[1]) / 2
permIDs_5p_t = list(map(lambda x: x - threshold, permIDs_5p))
warnings.filterwarnings("ignore")
fit_gpd = self.check_fit_gpd(np.partition(permIDs_5p_t, -E)[-E:])
while fit_gpd is not True:
E = E - 10
if E < 10:
break
threshold = (sorted(np.partition(permIDs_5p, -(E + 1))[-(E + 1):])[0] +
sorted(np.partition(permIDs_5p, -(E + 1))[-(E + 1):])[1]) / 2
permIDs_5p_t = list(map(lambda x: x - threshold, permIDs_5p))
fit_gpd = self.check_fit_gpd(np.partition(permIDs_5p_t, -E)[-E:])
if fit_gpd:
shape, loc, scale = genpareto.fit(np.partition(permIDs_5p_t, -E)[-E:], floc=0)
gpd_pvalue = (1 - genpareto.cdf((orig_id ** 5) - threshold, shape, loc, scale)) * E / permcount
if gpd_pvalue == 0:
targetperms = min(targetperms * 2, maxPerm)
if permcount == maxPerm:
P = (exceedances_count + 1) / (len(permIDs) + 1)
return P, None, None, permcount, "p_ecdf_with_pseudo (p_gpd=0)", b_aaclasstable, f_aaclasstable, shape, scale, E, ad_test(
np.partition(permIDs_5p_t, -E)[-E:],
genpareto(c=shape, scale=scale, loc=loc)).pvalue
continue
P_CI = self.calculate_gpd_CI(alpha, np.partition(permIDs_5p_t, -E)[-E:], permcount, shape,
scale, (orig_id ** 5) - threshold)
return gpd_pvalue, P_CI[0], P_CI[
1], permcount, "p_gpd", b_aaclasstable, f_aaclasstable, shape, scale, E, ad_test(
np.partition(permIDs_5p_t, -E)[-E:], genpareto(c=shape, scale=scale, loc=loc)).pvalue
else:
targetperms = min(targetperms * 2, maxPerm)
P = (exceedances_count + 1) / (len(permIDs) + 1)
return P, None, None, permcount, "p_ecdf_with_pseudo", b_aaclasstable, f_aaclasstable, None, None, None, None
def calc_ID_pvalue_MM(self, maxPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max, orig_id, pmethod, exceedances, targetperms, peaks, alpha):
if orig_id == 0:
return 1, None, None, None, None, None, None, None, None, None, None
if pmethod == "ECDF_pseudo":
perm_id_mm_values = self.calc_permvalues_id_mm(maxPerm, class_counts_b, class_counts_f, back_size,
b_functions, f_functions, max)
return self.calc_pecdf_with_pseudo(perm_id_mm_values, orig_id, class_counts_b, class_counts_f)
if pmethod == "ECDF":
return self.calc_pecdf_id_mm(maxPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max, orig_id, exceedances, alpha)
if pmethod == "GPD":
return self.calc_pgpd_ecdf_id_mm(maxPerm, class_counts_b, class_counts_f, back_size, b_functions,
f_functions,
max, orig_id, exceedances, targetperms, peaks, alpha)
def calc_permvalues_id_mm(self, numPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max):
class_list = []
for aaclass in class_counts_b.keys():
class_list.extend(aaclass * class_counts_b[aaclass])
for aaclass in class_counts_f.keys():
class_list.extend(aaclass * class_counts_f[aaclass])
indices = []
permIDs = []
for p in range(numPerm):
indices.append(self.shuffled(class_list))
for index in indices:
p_state_counts_back = Counter()
p_state_counts_fore = Counter()
for i, aaclass in enumerate(index):
if i < back_size:
p_state_counts_back[aaclass] += 1
else:
p_state_counts_fore[aaclass] += 1
exact = self.calculate_perm_exact(max, f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore))
# calculate the info for the fore ________________________________________________________________________
functions_array = np.array(
list((f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_fore.values()) + [0] * (len(f_functions) - len(p_state_counts_fore)))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if sum(p_state_counts_fore.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_fore.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(f_functions),
sum(p_state_counts_fore.values()))
if (expected_bg_entropy - fg_entropy) < 0:
info_fore = 0
else:
info_fore = expected_bg_entropy - fg_entropy
# calculate the info for the back ________________________________________________________________________
exact = self.calculate_perm_exact(max, b_functions - Counter(class_counts_b) + Counter(p_state_counts_back))
functions_array = np.array(
list((b_functions - Counter(class_counts_b) + Counter(p_state_counts_back)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_back.values()) + [0] * (len(b_functions) - len(p_state_counts_back)))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if sum(p_state_counts_back.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_back.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(b_functions),
sum(p_state_counts_back.values()))
if (expected_bg_entropy - fg_entropy) < 0:
info_back = 0
else:
info_back = expected_bg_entropy - fg_entropy
id_info = info_fore - info_back
if id_info < 0:
id_info = 0
permIDs.append(id_info)
return permIDs
def calc_pecdf_id_mm(self, maxPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max, orig_id, exceedances, alpha):
b_aaclasstable = ""
f_aaclasstable = ""
aaclasslist = []
for letter, count in sorted(class_counts_b.items()):
b_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
for letter, count in sorted(class_counts_f.items()):
f_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
permIDs = []
permcount = 0
exceedances_count = 0
while permcount < maxPerm:
permcount = permcount + 1
shuffled_aa = self.shuffled(aaclasslist)
p_state_counts_back = Counter()
p_state_counts_fore = Counter()
for (i, aaclass) in enumerate(shuffled_aa):
if i < back_size:
p_state_counts_back[aaclass] += 1
else:
p_state_counts_fore[aaclass] += 1
exact = self.calculate_perm_exact(max, f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore))
# calculate the info for the fore ________________________________________________________________________
functions_array = np.array(
list((f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_fore.values()) + [0] * (len(f_functions) - len(p_state_counts_fore)))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if sum(p_state_counts_fore.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_fore.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(f_functions),
sum(p_state_counts_fore.values()))
if (expected_bg_entropy - fg_entropy) < 0:
info_fore = 0
else:
info_fore = expected_bg_entropy - fg_entropy
# calculate the info for the back ________________________________________________________________________
exact = self.calculate_perm_exact(max, b_functions - Counter(class_counts_b) + Counter(p_state_counts_back))
functions_array = np.array(
list((b_functions - Counter(class_counts_b) + Counter(p_state_counts_back)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_back.values()) + [0] * (len(b_functions) - len(p_state_counts_back)))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if sum(p_state_counts_back.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_back.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(b_functions),
sum(p_state_counts_back.values()))
if (expected_bg_entropy - fg_entropy) < 0:
info_back = 0
else:
info_back = expected_bg_entropy - fg_entropy
id_info = info_fore - info_back
if id_info < 0:
id_info = 0
permIDs.append(id_info)
if id_info >= orig_id:
exceedances_count = exceedances_count + 1
if exceedances_count >= exceedances:
P = exceedances_count / len(permIDs)
P_CI = [norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount),
norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount)]
return P, P_CI[0], P_CI[
1], permcount, "p_ecdf", b_aaclasstable, f_aaclasstable, None, None, None, None
P = (exceedances_count + 1) / (len(permIDs) + 1)
return P, None, None, permcount, "p_ecdf_with_pseudo", b_aaclasstable, f_aaclasstable, None, None, None, None
def calc_pgpd_ecdf_id_mm(self, maxPerm, class_counts_b, class_counts_f, back_size, b_functions, f_functions,
max, orig_id, exceedances, targetperms, peaks, alpha):
b_aaclasstable = ""
f_aaclasstable = ""
aaclasslist = []
for letter, count in sorted(class_counts_b.items()):
b_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
for letter, count in sorted(class_counts_f.items()):
f_aaclasstable += letter + str(count)
aaclasslist.extend(letter * count)
permIDs = []
permcount = 0
exceedances_count = 0
while permcount < maxPerm:
permcount = permcount + 1
shuffled_aa = self.shuffled(aaclasslist)
p_state_counts_back = Counter()
p_state_counts_fore = Counter()
for (i, aaclass) in enumerate(shuffled_aa):
if i < back_size:
p_state_counts_back[aaclass] += 1
else:
p_state_counts_fore[aaclass] += 1
exact = self.calculate_perm_exact(max, f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore))
# calculate the info for the fore ________________________________________________________________________
functions_array = np.array(
list((f_functions - Counter(class_counts_f) + Counter(p_state_counts_fore)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_fore.values()) + [0] * (len(f_functions) - len(p_state_counts_fore)))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if sum(p_state_counts_fore.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_fore.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(f_functions),
sum(p_state_counts_fore.values()))
if (expected_bg_entropy - fg_entropy) < 0:
info_fore = 0
else:
info_fore = expected_bg_entropy - fg_entropy
# calculate the info for the back ________________________________________________________________________
exact = self.calculate_perm_exact(max, b_functions - Counter(class_counts_b) + Counter(p_state_counts_back))
functions_array = np.array(
list((b_functions - Counter(class_counts_b) + Counter(p_state_counts_back)).values()))
bg_entropy = -np.sum(
(functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(
functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))
nsb_array = np.array(
list(p_state_counts_back.values()) + [0] * (len(b_functions) - len(p_state_counts_back)))
fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(
nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))
if sum(p_state_counts_back.values()) <= len(exact):
expected_bg_entropy = exact[sum(p_state_counts_back.values()) - 1]
else:
expected_bg_entropy = self.approx_expect(bg_entropy, len(b_functions),
sum(p_state_counts_back.values()))
if (expected_bg_entropy - fg_entropy) < 0:
info_back = 0
else:
info_back = expected_bg_entropy - fg_entropy
id_info = info_fore - info_back
if id_info < 0:
id_info = 0
permIDs.append(id_info)
if id_info >= orig_id:
exceedances_count = exceedances_count + 1
if exceedances_count == exceedances:
P = exceedances_count / len(permIDs)
P_CI = [norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount),
norm.ppf(1 - alpha / 2, loc=0, scale=1) * np.sqrt(P * (1 - P) / permcount)]
return P, P_CI[0], P_CI[
1], permcount, "p_ecdf", b_aaclasstable, f_aaclasstable, None, None, None, None
else:
if permcount >= targetperms:
E = min(peaks, permcount // 3)
permIDs_5p = list(map(lambda x: x ** 5, permIDs))
threshold = (sorted(np.partition(permIDs_5p, -(E + 1))[-(E + 1):])[0] +
sorted(np.partition(permIDs_5p, -(E + 1))[-(E + 1):])[1]) / 2
permIDs_5p_t = list(map(lambda x: x - threshold, permIDs_5p))
warnings.filterwarnings("ignore")
fit_gpd = self.check_fit_gpd(np.partition(permIDs_5p_t, -E)[-E:])
while fit_gpd is not True:
E = E - 10
if E < 10:
break
threshold = (sorted(np.partition(permIDs_5p, -(E + 1))[-(E + 1):])[0] +
sorted(np.partition(permIDs_5p, -(E + 1))[-(E + 1):])[1]) / 2
permIDs_5p_t = list(map(lambda x: x - threshold, permIDs_5p))
fit_gpd = self.check_fit_gpd(np.partition(permIDs_5p_t, -E)[-E:])
if fit_gpd:
shape, loc, scale = genpareto.fit(np.partition(permIDs_5p_t, -E)[-E:], floc=0)
gpd_pvalue = (1 - genpareto.cdf((orig_id ** 5) - threshold, shape, loc, scale)) * E / permcount
if gpd_pvalue == 0:
targetperms = min(targetperms * 2, maxPerm)
if permcount == maxPerm:
P = (exceedances_count + 1) / (len(permIDs) + 1)
return P, None, None, permcount, "p_ecdf_with_pseudo (p_gpd=0)", b_aaclasstable, f_aaclasstable, shape, scale, E, ad_test(
np.partition(permIDs_5p_t, -E)[-E:],
genpareto(c=shape, scale=scale, loc=loc)).pvalue
continue
P_CI = self.calculate_gpd_CI(alpha, np.partition(permIDs_5p_t, -E)[-E:], permcount, shape,
scale, (orig_id ** 5) - threshold)
return gpd_pvalue, P_CI[0], P_CI[
1], permcount, "p_gpd", b_aaclasstable, f_aaclasstable, shape, scale, E, ad_test(
np.partition(permIDs_5p_t, -E)[-E:], genpareto(c=shape, scale=scale, loc=loc)).pvalue
else:
targetperms = min(targetperms * 2, maxPerm)
P = (exceedances_count + 1) / (len(permIDs) + 1)
return P, None, None, permcount, "p_ecdf_with_pseudo", b_aaclasstable, f_aaclasstable, None, None, None, None
def calculate_perm_exact(self, n, functions):
exact_list = []
p = [x / sum(list(functions.values())) for x in functions.values()]
for i in range(1, n + 1):
j = exact.calc_exact(i, p, len(functions.values()))
exact_list.append(j[1])
return exact_list
def approx_expect(self, H, k, N):
return H - ((k - 1) / ((mt.log(4)) * N))
def addstats(self, pvalues, correction, features):
P_corrected = {}
for key in pvalues.keys():
P_corrected[key] = defaultdict(lambda: defaultdict(float))
if features == "singles" or features == "both":
ss_coords = []
for coord in pvalues[key]:
if ("," not in str(coord)):
ss_coords.append(coord)
test_ss = []
ss_coords.sort()
for coord in ss_coords:
for state in sorted(pvalues[key][coord]):
test_ss.append(pvalues[key][coord][state])
test_ss_results = smm.multipletests(test_ss, method=correction)[1].tolist()
for coord in ss_coords:
for state in sorted(pvalues[key][coord]):
P_corrected[key][coord][state] = test_ss_results.pop(0)
if features == "pairs" or features == "both":
bp_coords = []
for coord in pvalues[key]:
if ("," in str(coord)):
bp_coords.append(coord)
test_bp = []
bp_coords.sort()
for coord in bp_coords:
for state in sorted(pvalues[key][coord]):
test_bp.append(pvalues[key][coord][state])
test_bp_results = smm.multipletests(test_bp, method=correction)[1].tolist()
for coord in bp_coords:
for state in sorted(pvalues[key][coord]):
P_corrected[key][coord][state] = test_bp_results.pop(0)
return P_corrected
def write_pvalues(self, P, CI_lower, CI_upper, corrected_P, height, logo_dic, prefix, permnum, ptype, bt, ft, shape,
scale, excnum, ADtest):
tableDict = {}
nameSet = ["Coord", "State", "Statistic", "Sample-Sz-Back", "Sample-Sz-Fore", "P-value", "CI.Lower", "CI.Upper",
"Adjusted-P", "Permutations", "P-Val-Method", "GPD-shape", "GPD-scale", "Peaks", "ADtest-P-val",
"Freqs-Back", "Freqs-Fore"]
for name in nameSet:
tableDict[name] = np.zeros(self.pos * len(self.singles) + len(self.basepairs) * len(self.pairs), )
pairwise_combinations = itertools.permutations(P.keys(), 2)
for key in pairwise_combinations:
tableDict['Coord'] = [pos for pos in range(self.pos) for state in self.singles] + \
[basepair for basepair in self.basepairs for state in P[key[0]][basepair]]
tableDict['State'] = [state for pos in range(self.pos) for state in self.singles] + \
[state for basepair in self.basepairs for state in P[key[0]][basepair]]
tableDict['Statistic'] = [height[key[0]][pos][state] for pos in range(self.pos) for state in self.singles] + \
[height[key[0]][basepair][state] for basepair in self.basepairs for state in
P[key[0]][basepair]]
tableDict['Sample-Sz-Back'] = [sum((logo_dic[key[0]].get([pos], state)).values()) for pos in range(self.pos)
for state in self.singles] + \
[sum((logo_dic[key[0]].get(basepair, state)).values()) for basepair in
self.basepairs for state in P[key[0]][basepair]]
tableDict['Sample-Sz-Fore'] = [sum((logo_dic[key[1]].get([pos], state)).values()) for pos in range(self.pos)
for state in self.singles] + \
[sum((logo_dic[key[1]].get(basepair, state)).values()) for basepair in
self.basepairs for state in P[key[1]][basepair]]
tableDict['P-value'] = [P[key[0]][pos][state] for pos in range(self.pos) for state in self.singles] + \
[P[key[0]][basepair][state] for basepair in self.basepairs for state in
P[key[0]][basepair]]
tableDict['CI.Lower'] = [CI_lower[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[CI_lower[key[0]][basepair][state] for basepair in self.basepairs for state in
CI_lower[key[0]][basepair]]
tableDict['CI.Upper'] = [CI_upper[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[CI_upper[key[0]][basepair][state] for basepair in self.basepairs for state in
CI_upper[key[0]][basepair]]
tableDict['Adjusted-P'] = [corrected_P[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[corrected_P[key[0]][basepair][state] for basepair in self.basepairs for state
in
P[key[0]][basepair]]
tableDict['Permutations'] = [permnum[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[permnum[key[0]][basepair][state] for basepair in self.basepairs for state in
permnum[key[0]][basepair]]
tableDict['P-Val-Method'] = [ptype[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[ptype[key[0]][basepair][state] for basepair in self.basepairs for state in
ptype[key[0]][basepair]]
tableDict['GPD-shape'] = [shape[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[shape[key[0]][basepair][state] for basepair in self.basepairs for state in
shape[key[0]][basepair]]
tableDict['GPD-scale'] = [scale[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[scale[key[0]][basepair][state] for basepair in self.basepairs for state in
scale[key[0]][basepair]]
tableDict['Peaks'] = [excnum[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[excnum[key[0]][basepair][state] for basepair in self.basepairs for state in
excnum[key[0]][basepair]]
tableDict['ADtest-P-val'] = [ADtest[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[ADtest[key[0]][basepair][state] for basepair in self.basepairs for state in
ADtest[key[0]][basepair]]
tableDict['Freqs-Back'] = [bt[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[bt[key[0]][basepair][state] for basepair in self.basepairs for state in
bt[key[0]][basepair]]
tableDict['Freqs-Fore'] = [ft[key[0]][pos][state] for pos in range(self.pos) for state in
self.singles] + \
[ft[key[0]][basepair][state] for basepair in self.basepairs for state in
ft[key[0]][basepair]]
pandasTable = pd.DataFrame(tableDict)
filename = prefix + '_' + key[1] + '_' + key[0] + "_stats.txt"
pandasTable.to_csv(filename, index=None, sep='\t')
| 51.500283
| 164
| 0.534049
| 20,307
| 181,899
| 4.537302
| 0.03974
| 0.037248
| 0.020057
| 0.015629
| 0.804328
| 0.784467
| 0.758886
| 0.740251
| 0.728606
| 0.71569
| 0
| 0.012848
| 0.353048
| 181,899
| 3,531
| 165
| 51.514868
| 0.770114
| 0.098043
| 0
| 0.703676
| 0
| 0
| 0.020052
| 0.001271
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027941
| false
| 0.000368
| 0.009559
| 0.001838
| 0.067279
| 0.004044
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37911391c2944b77cf80a9e8030db9f1b5328482
| 172
|
py
|
Python
|
planners/__init__.py
|
AlgTUDelft/AlwaysSafe
|
76beccb698a07c13f3c765c52b62683ad75ba7eb
|
[
"MIT"
] | 10
|
2021-04-19T17:51:10.000Z
|
2022-01-13T06:16:22.000Z
|
planners/__init__.py
|
AlgTUDelft/AlwaysSafe
|
76beccb698a07c13f3c765c52b62683ad75ba7eb
|
[
"MIT"
] | null | null | null |
planners/__init__.py
|
AlgTUDelft/AlwaysSafe
|
76beccb698a07c13f3c765c52b62683ad75ba7eb
|
[
"MIT"
] | 1
|
2021-12-07T13:24:05.000Z
|
2021-12-07T13:24:05.000Z
|
from .lp import LinearProgrammingPlanner
from .lp_optimistic import OptimisticLinearProgrammingPlanner
from .abs_lp_optimistic import AbsOptimisticLinearProgrammingPlanner
| 43
| 68
| 0.912791
| 15
| 172
| 10.266667
| 0.533333
| 0.077922
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 172
| 3
| 69
| 57.333333
| 0.9625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8074d2dafffaa21788b92d3dd9576b71a21e8b58
| 13,857
|
py
|
Python
|
pqviz/plots.py
|
mitre/PQViz
|
229e662c408e0532df44585d134b8e79eb6c4cf8
|
[
"Apache-2.0"
] | null | null | null |
pqviz/plots.py
|
mitre/PQViz
|
229e662c408e0532df44585d134b8e79eb6c4cf8
|
[
"Apache-2.0"
] | null | null | null |
pqviz/plots.py
|
mitre/PQViz
|
229e662c408e0532df44585d134b8e79eb6c4cf8
|
[
"Apache-2.0"
] | 1
|
2022-01-18T21:00:39.000Z
|
2022-01-18T21:00:39.000Z
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import glob
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from pathlib import Path
import matplotlib.ticker as mticker
import os
import warnings
from textwrap import wrap
def plot_pop(df, selected_demo, sam_type, demographic_type, population_group):
"""
Creates a horizontal bar plot that plots the counts of the perscribed sample type by BMI category
Parameters:
df: Data frame created using create_population_df() function
selected_demo: selected subsected demographic from dropdown. Options will change depending on demographic type.
sam_type: type of population, selected from dropdown, expected Values ['Population', 'Sample']
demographic_type: Demographic that they are comparing, select from dropdown earlier in notebook, expected values ['sex', 'race', 'age']
population_group: Type of population, expected inputs ['Pediatric', 'Adult']
Returns:
A horizontal bar plot that plots the counts of the perscribed sample type by BMI category."""
if population_group == "Pediatric":
plt.figure(figsize=(10, 8))
selected_demo_mask = df[demographic_type] == selected_demo
prev_type_mask = df["Population type"] == sam_type
subsected_df = df[selected_demo_mask & prev_type_mask]
subsected_df["Population"] = subsected_df["Population"].fillna(0)
ax = sns.barplot(
data=subsected_df, y="Weight Category", x="Population", ci=None
)
max_x = max(subsected_df["Population"])
plt.xlim(left=0, right=max_x + max_x / 10) # set the xlim to left, right
for p in ax.patches:
width = p.get_width() # get bar length
if width == 0:
ax.text(
width + max_x / 2.3,
p.get_y()
+ p.get_height() / 2, # get Y coordinate + X coordinate / 2
"Suppressed Data", # set Name to ad
ha="left", # horizontal alignment
va="center", # vertical alignment
size=16,
) # font size
else:
ax.text(
width + 1, # set the text at 1 unit right of the bar
p.get_y()
+ p.get_height() / 2, # get Y coordinate + X coordinate / 2
"{:,.0f}".format(width), # set variable to display, 2 decimals
ha="left", # horizontal alignment
va="center",
) # vertical alignment
# after plotting the data, format the labels
label_format = "{:,.0f}"
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
ticks_loc = ax.get_xticks().tolist()
ax.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc))
ax.set_xticklabels([label_format.format(x) for x in ticks_loc])
# plt.savefig(path + '{}'.format(Type) + "_{}".format(Race)+".png")
state = list(subsected_df["state"])[0]
plt.title(
"{}".format(sam_type)
+ " Size by BMI Category for \n{}".format(selected_demo)
+ " {} Data".format(population_group)
+ " in {}".format(state),
fontsize=20,
pad=20,
)
plt.xlabel("{}".format(sam_type), fontsize=16)
# subsected_df['Weight Category'] = ['\n'.join(wrap(x, 12)) for x in subsected_df['Weight Category']]
peds_labels = [
"(1) Underweight \n(<5th percentile)",
"(2) Healthy Weight \n(5th to <85th percentile)",
"(3) Overweight \n(85th to <95th percentile)",
"(4) Obesity \n(>95th percentile)",
"(4b) Severe Obesity \n(>120% of the 95th percentile)",
]
ax.yaxis.set_ticklabels(peds_labels)
plt.ylabel("BMI Category", fontsize=16)
plt.show()
elif population_group == "Adult":
plt.figure(figsize=(10, 8))
selected_demo_mask = df[demographic_type] == selected_demo
sample_type_mask = df["Population type"] == sam_type
summary_mask = df["Weight Category"] != "(4) Obesity (Classes 1, 2, and 3) (BMI 30+)"
subsected_df = df[selected_demo_mask & sample_type_mask & summary_mask]
subsected_df["Population"] = subsected_df["Population"].fillna(0)
ax = sns.barplot(
data=subsected_df, y="Weight Category", x="Population", ci=None
)
max_x = max(subsected_df["Population"])
plt.xlim(left=0, right=max_x + max_x / 10) # set the xlim to left, right
for p in ax.patches:
width = p.get_width() # get bar length
if width == 0:
ax.text(
width + max_x / 2.3,
p.get_y()
+ p.get_height() / 2, # get Y coordinate + X coordinate / 2
"Suppressed Data", # set Name to ad
ha="left", # horizontal alignment
va="center", # vertical alignment
size=16,
) # font size
else:
ax.text(
width + 1, # set the text at 1 unit right of the bar
p.get_y()
+ p.get_height() / 2, # get Y coordinate + X coordinate / 2
"{:,.0f}".format(width), # set variable to display, 2 decimals
ha="left", # horizontal alignment
va="center",
) # vertical alignment
# after plotting the data, format the labels
label_format = "{:,.0f}"
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
ticks_loc = ax.get_xticks().tolist()
ax.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc))
ax.set_xticklabels([label_format.format(x) for x in ticks_loc])
# plt.savefig(path + '{}'.format(Type) + "_{}".format(Race)+".png")
state = list(subsected_df["state"])[0]
plt.title(
"{}".format(sam_type)
+ " Size by BMI Category for \n{}".format(selected_demo)
+ " {}".format(population_group)
+ " Data"
+ " in {}".format(state),
fontsize=20,
pad=20,
)
plt.xlabel("{}".format(sam_type), fontsize=16)
# subsected_df['Weight Category'] = ['\n'.join(wrap(x, 12)) for x in subsected_df['Weight Category']]
adult_labels = [
"(1) Underweight \n(BMI<18.5)",
"(2) Healthy Weight \n(18.5<=BMI<25)",
"(3) Overweight \n(25<=BMI<30)",
"(4a) Obesity (Class 1) \n(30<=BMI<35)",
"(4b) Obesity (Class 2) \n(35<=BMI<40)",
"(4c) Obesity (Class 3) - Severe Obesity \n(BMI 40+)",
]
ax.yaxis.set_ticklabels(adult_labels)
plt.ylabel("BMI Category", fontsize=16)
plt.show()
def plot_prevalence(df, selected_demo, prevalence_type, demographic_type, population_group):
"""
Creates a horizontal bar plot that plots the prevelance of the perscribed sample type by BMI category
Parameters:
df: Data frame created using create_population_df() function
selected_demo: selected subsected demographic from dropdown. Options will change depending on demographic type.
prevalence_type: type of prevalence, selected from dropdown, expected Values ['Crude', 'Age-Adjusted', 'Weighted']
demographic_type: Demographic that they are comparing, select from dropdown earlier in notebook, expected values ['sex', 'race', 'age']
population_group: Type of population, expected inputs ['Pediatric', 'Adult']
Returns:
A horizontal bar plot that plots the perevalence of the perscribed sample type by BMI category with the
standard error calculated with CODI-PQ represented by error bars. """
if population_group == "Pediatric":
plt.figure(figsize=(10, 8))
selected_demo_mask = (df[demographic_type] == selected_demo)
prev_type_mask = df["Prevalence type"] == prevalence_type
subsected_df = df[selected_demo_mask & prev_type_mask]
subsected_df = subsected_df.fillna(0)
subsected_df["Prevalence"] = pd.to_numeric(subsected_df["Prevalence"])
subsected_df["Standard Error"] = pd.to_numeric(subsected_df["Standard Error"])
ax = sns.barplot(data=subsected_df, y="Weight Category", x="Prevalence", ci=None)
max_x = max(subsected_df["Prevalence"])
plt.xlim(left=0, right=max_x + max_x / 10) # set the xlim to left, right
for p in ax.patches:
width = p.get_width() # get bar length
if width == 0:
ax.text(
width + max_x / 2.3,
p.get_y() + p.get_height() / 2, # get Y coordinate + X coordinate / 2
"Suppressed Data", # set Name to ad
ha="left", # horizontal alignment
va="center", # vertical alignment
size=16,
) # font size
ax.errorbar(
y=subsected_df["Weight Category"],
x=subsected_df["Prevalence"],
xerr=subsected_df["Standard Error"],
linewidth=1.5,
color="black",
alpha=0.4,
capsize=8,
ls="none",
capthick = 2
)
label_format = "{:,.0f}"
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
ticks_loc = ax.get_xticks().tolist()
ax.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc))
ax.set_xticklabels([label_format.format(x) for x in ticks_loc])
state = subsected_df["state"].unique()[0]
plt.title(
"BMI Category {}".format(prevalence_type)
+ "\n Prevalence for {}".format(selected_demo)
+ " {}".format(population_group)
+ " Data"
+ " in {}".format(state),
fontsize=14,
pad=20,
)
plt.xlabel("Prevalence", fontsize=16)
# subsected_df['BMI Category'] = ['\n'.join(wrap(x, 12)) for x in subsected_df['BMI Category']]
plt.ylabel("BMI Category", fontsize=16)
peds_labels = [
"(1) Underweight \n(<5th percentile)",
"(2) Healthy Weight \n(5th to <85th percentile)",
"(3) Overweight \n(85th to <95th percentile)",
"(4) Obesity \n(>95th percentile)",
"(4b) Severe Obesity \n(>120% of the 95th percentile)",
]
ax.yaxis.set_ticklabels(peds_labels)
plt.show()
elif population_group == 'Adult':
plt.figure(figsize=(10, 8))
selected_demo_mask = df[demographic_type] == selected_demo
prev_type_mask = df["Prevalence type"] == prevalence_type
summary_mask = df["Weight Category"] != "(4) Obesity (Classes 1, 2, and 3) (BMI 30+)"
subsected_df = df[selected_demo_mask & prev_type_mask & summary_mask]
subsected_df = subsected_df.fillna(0)
subsected_df["Prevalence"] = pd.to_numeric(subsected_df["Prevalence"])
subsected_df["Standard Error"] = pd.to_numeric(subsected_df["Standard Error"])
ax = sns.barplot(data=subsected_df, y="Weight Category", x="Prevalence", ci=None)
max_x = max(subsected_df["Prevalence"])
plt.xlim(left=0, right=max_x + max_x / 10) # set the xlim to left, right
for p in ax.patches:
width = p.get_width() # get bar length
if width == 0:
ax.text(
width + max_x / 2.3,
p.get_y() + p.get_height() / 2, # get Y coordinate + X coordinate / 2
"Suppressed Data", # set Name to ad
ha="left", # horizontal alignment
va="center", # vertical alignment
size=16,
) # font size
ax.errorbar(
y=subsected_df["Weight Category"],
x=subsected_df["Prevalence"],
xerr=subsected_df["Standard Error"],
linewidth=1.5,
color="black",
alpha=0.4,
capsize=8,
ls="none",
capthick = 2
)
label_format = "{:,.0f}"
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
ticks_loc = ax.get_xticks().tolist()
ax.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc))
ax.set_xticklabels([label_format.format(x) for x in ticks_loc])
state = subsected_df["state"].unique()[0]
plt.title(
"BMI Category {}".format(prevalence_type)
+ "\n Prevalence for {}".format(selected_demo)
+ " {}".format(population_group)
+ " Data"
+ " in {}".format(state),
fontsize=14,
pad=20,
)
plt.xlabel("Prevalence", fontsize=16)
# subsected_df['BMI Category'] = ['\n'.join(wrap(x, 12)) for x in subsected_df['BMI Category']]
plt.ylabel("BMI Category", fontsize=16)
adult_labels = [
"(1) Underweight \n(BMI<18.5)",
"(2) Healthy Weight \n(18.5<=BMI<25)",
"(3) Overweight \n(25<=BMI<30)",
"(4a) Obesity (Class 1) \n(30<=BMI<35)",
"(4b) Obesity (Class 2) \n(35<=BMI<40)",
"(4c) Obesity (Class 3) - Severe Obesity \n(BMI 40+)",
]
ax.yaxis.set_ticklabels(adult_labels)
# plt.savefig(path + '{}'.format(prevalence_type) + "_{}".format(selected_demo)+".png")
plt.show()
| 46.814189
| 139
| 0.558562
| 1,642
| 13,857
| 4.572473
| 0.132156
| 0.067395
| 0.017048
| 0.015983
| 0.919419
| 0.910362
| 0.907432
| 0.901572
| 0.89651
| 0.886787
| 0
| 0.026123
| 0.320416
| 13,857
| 295
| 140
| 46.972881
| 0.771159
| 0.22126
| 0
| 0.840637
| 0
| 0
| 0.175488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007968
| false
| 0
| 0.047809
| 0
| 0.055777
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
80e84865f447fa3dac9bf6562c6553b4d52842dd
| 140
|
py
|
Python
|
ve/fwrisc_rv32imc/tests/pyfv-hpi/fwrisc_tests/__init__.py
|
Kamran-10xe/fwrisc
|
5c742c60ba620944ba19741c02782fb6b45d514e
|
[
"Apache-2.0"
] | 10
|
2019-10-02T09:58:58.000Z
|
2021-06-13T22:45:17.000Z
|
ve/fwrisc_rv32imc/tests/pyfv-hpi/fwrisc_tests/__init__.py
|
Kamran-10xe/fwrisc
|
5c742c60ba620944ba19741c02782fb6b45d514e
|
[
"Apache-2.0"
] | 1
|
2021-12-04T06:12:19.000Z
|
2022-02-18T13:20:55.000Z
|
ve/fwrisc_rv32imc/tests/pyfv-hpi/fwrisc_tests/__init__.py
|
Kamran-10xe/fwrisc
|
5c742c60ba620944ba19741c02782fb6b45d514e
|
[
"Apache-2.0"
] | 8
|
2021-02-08T02:25:24.000Z
|
2022-03-01T05:13:44.000Z
|
print("Hello from fwrisc_tests")
from fwrisc_tests.instr import instr_main
from fwrisc_tests.riscv_compliance import riscv_compliance_main
| 28
| 63
| 0.871429
| 21
| 140
| 5.47619
| 0.47619
| 0.26087
| 0.391304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 140
| 5
| 63
| 28
| 0.898438
| 0
| 0
| 0
| 0
| 0
| 0.164286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0386e491e453d00e3a43e58aa5f976e4f825110d
| 1,065
|
py
|
Python
|
tests/test_search/test_search_shapes_without_search_doc.py
|
newmediaresearch/vidispine-adapter
|
95d5b1956c3767ff9ff0626048f52628d5322cab
|
[
"MIT"
] | null | null | null |
tests/test_search/test_search_shapes_without_search_doc.py
|
newmediaresearch/vidispine-adapter
|
95d5b1956c3767ff9ff0626048f52628d5322cab
|
[
"MIT"
] | 1
|
2021-03-16T11:02:59.000Z
|
2021-03-16T11:02:59.000Z
|
tests/test_search/test_search_shapes_without_search_doc.py
|
newmediaresearch/vidispine-adapter
|
95d5b1956c3767ff9ff0626048f52628d5322cab
|
[
"MIT"
] | null | null | null |
def test_search_shape(vidispine, cassette, item):
result = vidispine.search.shape()
assert 'id' in result['shape'][0]
assert result['shape'][0]['item'][0]['id'] == item
assert cassette.all_played
def test_search_shape_with_params(vidispine, cassette, item):
result = vidispine.search.shape(params={'content': 'metadata'})
assert 'id' in result['shape'][0]
assert result['shape'][0]['item'][0]['id'] == item
assert cassette.all_played
def test_search_shape_with_matrix_params(vidispine, cassette, item):
result = vidispine.search.shape(matrix_params={'number': 10, 'first': 1})
assert 'id' in result['shape'][0]
assert result['shape'][0]['item'][0]['id'] == item
assert cassette.all_played
def test_search_shape_with_params_and_matrix_params(vidispine, cassette, item):
result = vidispine.search.shape(
params={'content': 'metadata'}, matrix_params={'number': 10}
)
assert 'id' in result['shape'][0]
assert result['shape'][0]['item'][0]['id'] == item
assert cassette.all_played
| 30.428571
| 79
| 0.675117
| 142
| 1,065
| 4.894366
| 0.169014
| 0.126619
| 0.13813
| 0.103597
| 0.903597
| 0.903597
| 0.903597
| 0.835971
| 0.835971
| 0.742446
| 0
| 0.018889
| 0.15493
| 1,065
| 34
| 80
| 31.323529
| 0.753333
| 0
| 0
| 0.545455
| 0
| 0
| 0.111737
| 0
| 0
| 0
| 0
| 0
| 0.545455
| 1
| 0.181818
| false
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
03ddcfe63e8c1da792f2667611fd793ce818101c
| 174
|
py
|
Python
|
app/utils.py
|
techouse/nordend
|
9129c5dc75f338ba0b4fc6c6a8b6bfdc334264d4
|
[
"MIT"
] | null | null | null |
app/utils.py
|
techouse/nordend
|
9129c5dc75f338ba0b4fc6c6a8b6bfdc334264d4
|
[
"MIT"
] | 1
|
2020-03-03T07:58:56.000Z
|
2020-03-03T07:58:56.000Z
|
app/utils.py
|
techouse/nordend
|
9129c5dc75f338ba0b4fc6c6a8b6bfdc334264d4
|
[
"MIT"
] | null | null | null |
from urllib.parse import urljoin, quote_plus
def multi_urljoin(*parts):
return urljoin(parts[0], "/".join(quote_plus(part.strip("/"), safe="/") for part in parts[1:]))
| 29
| 99
| 0.689655
| 26
| 174
| 4.5
| 0.730769
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013072
| 0.12069
| 174
| 5
| 100
| 34.8
| 0.751634
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
03f9fd7426ccfa9b432865e4104b5f5f60e93d50
| 3,002
|
py
|
Python
|
tests/setup.py
|
dkudeki/BookwormDB
|
dcb3dab8eab07b9cff283a85a37a1a57428d1126
|
[
"MIT"
] | 73
|
2015-01-14T06:28:48.000Z
|
2022-01-21T18:56:05.000Z
|
tests/setup.py
|
dkudeki/BookwormDB
|
dcb3dab8eab07b9cff283a85a37a1a57428d1126
|
[
"MIT"
] | 96
|
2015-01-19T16:47:03.000Z
|
2021-09-10T15:28:59.000Z
|
tests/setup.py
|
dkudeki/BookwormDB
|
dcb3dab8eab07b9cff283a85a37a1a57428d1126
|
[
"MIT"
] | 13
|
2015-01-19T16:05:13.000Z
|
2021-08-16T18:08:10.000Z
|
from __future__ import print_function
import bookwormDB
import bookwormDB.CreateDatabase
from bookwormDB.general_API import SQLAPIcall as SQLAPIcall
import logging
import os
from subprocess import call as call
import sys
import json
from shutil import rmtree
def setup_bookworm():
"""
Creates a test bookworm. Removes any existing databases called "federalist_bookworm"
"""
logging.info("\n\nTESTING BOOKWORM CREATION\n\n")
import MySQLdb
from warnings import filterwarnings
filterwarnings('ignore', category = MySQLdb.Warning)
import bookwormDB.configuration
os.chdir(sys.path[0] + "/test_bookworm_files")
rmtree(".bookworm", ignore_errors = True)
bookwormDB.configuration.create(ask_about_defaults=False, database="federalist_bookworm")
db = bookwormDB.CreateDatabase.DB(dbname="mysql")
try:
db.query("DROP DATABASE IF EXISTS federalist_bookworm")
except MySQLdb.OperationalError as e:
if e[0]==1008:
pass
else:
print(e)
raise
except Exception as e:
"""
This is some weird MariaDB exception. It sucks that I'm compensating for it here.
"""
if e[0]=="Cannot load from mysql.proc. The table is probably corrupted":
pass
else:
print(e)
logging.warning("Some mysterious error in attempting to drop previous iterations: just try running it again?")
call(["bookworm --log-level warning build all"],shell=True,cwd=sys.path[0] + "/test_bookworm_files")
def setup_bookworm_unicode():
"""
Creates a test bookworm. Removes any existing databases called "unicode_test_bookworm"
"""
logging.info("\n\nTESTING BOOKWORM CREATION\n\n")
import MySQLdb
from warnings import filterwarnings
filterwarnings('ignore', category = MySQLdb.Warning)
import bookwormDB.configuration
os.chdir(sys.path[0] + "/test_bookworm_files_unicode")
rmtree(".bookworm", ignore_errors = True)
bookwormDB.configuration.create(ask_about_defaults=False,database="unicode_test_bookworm")
db = bookwormDB.CreateDatabase.DB(dbname="mysql")
try:
db.query("DROP DATABASE IF EXISTS unicode_test_bookworm")
except MySQLdb.OperationalError as e:
if e[0]==1008:
pass
else:
print(e)
raise
except Exception as e:
"""
This is some weird MariaDB exception. It sucks that I'm compensating for it here.
"""
if e[0]=="Cannot load from mysql.proc. The table is probably corrupted":
pass
else:
logging.warning("Some mysterious error in attempting to drop previous iterations: just try running it again?")
call(["bookworm --log-level warning build all"],
shell=True,
cwd=sys.path[0] + "/test_bookworm_files_unicode")
if __name__=="__main__":
setup_bookworm()
setup_bookworm_unicode()
| 32.27957
| 122
| 0.666556
| 360
| 3,002
| 5.433333
| 0.302778
| 0.055215
| 0.01636
| 0.02454
| 0.810838
| 0.810838
| 0.810838
| 0.803681
| 0.803681
| 0.749489
| 0
| 0.007061
| 0.24517
| 3,002
| 92
| 123
| 32.630435
| 0.856134
| 0.056962
| 0
| 0.609375
| 0
| 0
| 0.276555
| 0.037852
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0.0625
| 0.25
| 0
| 0.28125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
206a74c2f87f04c18f5e20a54506851dcf2d501b
| 167
|
py
|
Python
|
async_stripe/api_resources/issuing/__init__.py
|
bhch/async-stripe
|
75d934a8bb242f664e7be30812c12335cf885287
|
[
"MIT",
"BSD-3-Clause"
] | 8
|
2021-05-29T08:57:58.000Z
|
2022-02-19T07:09:25.000Z
|
async_stripe/api_resources/issuing/__init__.py
|
bhch/async-stripe
|
75d934a8bb242f664e7be30812c12335cf885287
|
[
"MIT",
"BSD-3-Clause"
] | 5
|
2021-05-31T10:18:36.000Z
|
2022-01-25T11:39:03.000Z
|
async_stripe/api_resources/issuing/__init__.py
|
bhch/async-stripe
|
75d934a8bb242f664e7be30812c12335cf885287
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-05-29T13:27:10.000Z
|
2021-05-29T13:27:10.000Z
|
from async_stripe.api_resources.issuing import authorization
from async_stripe.api_resources.issuing import card
from async_stripe.api_resources.issuing import dispute
| 55.666667
| 60
| 0.898204
| 24
| 167
| 6
| 0.416667
| 0.1875
| 0.3125
| 0.375
| 0.833333
| 0.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.065868
| 167
| 3
| 61
| 55.666667
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
456de17d218efe2b7f0bc35c4ad4fb21bf4032d8
| 3,793
|
py
|
Python
|
tests/crystal/SimulationCell/test_SimulationCell0.py
|
eragasa/mexm-base
|
c8d84057c483e1bd06bb8b2e835274f6a4cd61b9
|
[
"MIT"
] | 1
|
2021-01-03T21:30:47.000Z
|
2021-01-03T21:30:47.000Z
|
tests/crystal/SimulationCell/test_SimulationCell0.py
|
eragasa/mexm-base
|
c8d84057c483e1bd06bb8b2e835274f6a4cd61b9
|
[
"MIT"
] | null | null | null |
tests/crystal/SimulationCell/test_SimulationCell0.py
|
eragasa/mexm-base
|
c8d84057c483e1bd06bb8b2e835274f6a4cd61b9
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
from mexm.crystal import Atom
from mexm.crystal import Lattice
from simulationcell import SimulationCell
def test__init__noargs():
cell = SimulationCell()
assert isinstance(cell.lattice, Lattice)
# testing properties
assert isinstance(cell.H, np.ndarray)
np.testing.assert_array_equal(cell.H,
cell.lattice.H)
assert isinstance(cell.a0, float)
assert cell.a0 == cell.lattice.a0
assert cell.atomic_basis == []
assert cell.vacancies == []
assert cell.interstitials == []
def test__H__set_w_list_of_lists():
cell = SimulationCell()
cell.H = [[1,2,3],
[4,5,6],
[7,8,9]]
np.testing.assert_array_equal(cell.h1,
np.array([1,4,7]))
np.testing.assert_array_equal(cell.h2,
np.array([2,5,8]))
np.testing.assert_array_equal(cell.h3,
np.array([3,6,9]))
def test__H__set_w_numpyarray():
cell = SimulationCell()
cell.H = np.array([[1,2,3],
[4,5,6],
[7,8,9]])
np.testing.assert_array_equal(cell.h1,
np.array([1,4,7]))
np.testing.assert_array_equal(cell.h2,
np.array([2,5,8]))
np.testing.assert_array_equal(cell.h3,
np.array([3,6,9]))
def test__h1__set_w_list():
cell = SimulationCell()
cell.H = [[1,2,3],[4,5,6],[7,8,9]]
cell.h1 = [1,2,3]
np.testing.assert_array_equal(cell.h1,
np.array([1,2,3]))
np.testing.assert_array_equal(cell.h2,
np.array([2,5,8]))
np.testing.assert_array_equal(cell.h3,
np.array([3,6,9]))
def test__h1__set_w_numpyarray():
cell = SimulationCell()
cell.H = [[1,2,3],[4,5,6],[7,8,9]]
cell.h1 = np.array([1,2,3])
np.testing.assert_array_equal(cell.h1,
np.array([1,2,3]))
np.testing.assert_array_equal(cell.h2,
np.array([2,5,8]))
np.testing.assert_array_equal(cell.h3,
np.array([3,6,9]))
def test__h2__set_w_list():
cell = SimulationCell()
cell.H = [[1,2,3],[4,5,6],[7,8,9]]
cell.h2 = [4,5,6]
np.testing.assert_array_equal(cell.h1,
np.array([1,4,7]))
np.testing.assert_array_equal(cell.h2,
np.array([4,5,6]))
np.testing.assert_array_equal(cell.h3,
np.array([3,6,9]))
def test__h2__set_w_numpyarray():
cell = SimulationCell()
cell.H = [[1,2,3],[4,5,6],[7,8,9]]
cell.h2 = np.array([4,5,6])
np.testing.assert_array_equal(cell.h1,
np.array([1,4,7]))
np.testing.assert_array_equal(cell.h2,
np.array([4,5,6]))
np.testing.assert_array_equal(cell.h3,
np.array([3,6,9]))
def test__h3__set_w_list():
cell = SimulationCell()
cell.H = [[1,2,3],[4,5,6],[7,8,9]]
cell.h3 = [7,8,9]
np.testing.assert_array_equal(cell.h1, np.array([1,4,7]))
np.testing.assert_array_equal(cell.h2, np.array([2,5,8]))
np.testing.assert_array_equal(cell.h3, np.array([7,8,9]))
def test__h3__set_w_numpyarray():
cell = SimulationCell()
cell.H = [[1,2,3],[4,5,6],[7,8,9]]
cell.h3 = np.array([7,8,9])
np.testing.assert_array_equal(cell.h1,np.array([1,4,7]))
np.testing.assert_array_equal(cell.h2,np.array([2,5,8]))
np.testing.assert_array_equal(cell.h3,np.array([7,8,9]))
if __name__ == "__main__":
cell = SimulationCell()
| 31.87395
| 61
| 0.532296
| 555
| 3,793
| 3.434234
| 0.091892
| 0.102833
| 0.196747
| 0.262329
| 0.789612
| 0.779119
| 0.758132
| 0.735047
| 0.735047
| 0.735047
| 0
| 0.076455
| 0.306881
| 3,793
| 118
| 62
| 32.144068
| 0.648536
| 0.004746
| 0
| 0.638298
| 0
| 0
| 0.00212
| 0
| 0
| 0
| 0
| 0
| 0.340426
| 1
| 0.095745
| false
| 0
| 0.053191
| 0
| 0.148936
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4584a48b6628705110851e08974e633e5e46d84a
| 1,140
|
py
|
Python
|
server/ca/models.py
|
knaou/mysign
|
83a2748f2e3a69bc8741bc6a4ee2bb508a8aadba
|
[
"MIT"
] | null | null | null |
server/ca/models.py
|
knaou/mysign
|
83a2748f2e3a69bc8741bc6a4ee2bb508a8aadba
|
[
"MIT"
] | 5
|
2020-07-16T20:00:26.000Z
|
2021-10-05T20:29:21.000Z
|
server/ca/models.py
|
knaou/mysign
|
83a2748f2e3a69bc8741bc6a4ee2bb508a8aadba
|
[
"MIT"
] | null | null | null |
from django.db import models
class CertificateAuthority(models.Model):
name = models.CharField(max_length=255, null=False)
description = models.TextField(null=False, blank=True, default='')
next_serial = models.IntegerField(null=False)
key_pem = models.TextField(null=False)
csr_pem = models.TextField(null=False)
cert_pem = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Certificate(models.Model):
certificate_authority = models.ForeignKey(CertificateAuthority, related_name='certificates', on_delete=models.CASCADE)
name = models.CharField(max_length=255, null=False)
description = models.TextField(null=False, blank=True, default='')
serial = models.IntegerField(null=False)
key_pem = models.TextField(null=False)
csr_pem = models.TextField(null=False)
cert_pem = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('certificate_authority', 'serial',)
| 40.714286
| 122
| 0.750877
| 141
| 1,140
| 5.900709
| 0.319149
| 0.129808
| 0.182692
| 0.230769
| 0.723558
| 0.723558
| 0.723558
| 0.723558
| 0.723558
| 0.723558
| 0
| 0.00611
| 0.138596
| 1,140
| 27
| 123
| 42.222222
| 0.841141
| 0
| 0
| 0.636364
| 0
| 0
| 0.034211
| 0.018421
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.954545
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
45e1e7c510760c75b1bf6ce5886ce13a5e951a43
| 137
|
py
|
Python
|
tests/project/manage.py
|
Zadigo/zacoby
|
468168e416603e00ef1541990843fd7e841f0ff2
|
[
"MIT"
] | 1
|
2021-02-25T03:26:52.000Z
|
2021-02-25T03:26:52.000Z
|
tests/project/manage.py
|
Zadigo/zacoby
|
468168e416603e00ef1541990843fd7e841f0ff2
|
[
"MIT"
] | null | null | null |
tests/project/manage.py
|
Zadigo/zacoby
|
468168e416603e00ef1541990843fd7e841f0ff2
|
[
"MIT"
] | null | null | null |
#! bin/bash
import sys
import os
def execute_command_inline():
os.eviron.setdefault('ZACOBY_PROJECT_SETTINGS', 'project.settings')
| 17.125
| 71
| 0.766423
| 18
| 137
| 5.611111
| 0.777778
| 0.29703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116788
| 137
| 7
| 72
| 19.571429
| 0.834711
| 0.072993
| 0
| 0
| 0
| 0
| 0.309524
| 0.18254
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
afe5a583bb668635916096efefc0048e4f487138
| 135
|
py
|
Python
|
src/chase_the_pair.py
|
AlbertSuarez/hackeps-chasethepair
|
15b2e3fad1601baa7ada4f2e50f704646ffb9f39
|
[
"MIT"
] | null | null | null |
src/chase_the_pair.py
|
AlbertSuarez/hackeps-chasethepair
|
15b2e3fad1601baa7ada4f2e50f704646ffb9f39
|
[
"MIT"
] | null | null | null |
src/chase_the_pair.py
|
AlbertSuarez/hackeps-chasethepair
|
15b2e3fad1601baa7ada4f2e50f704646ffb9f39
|
[
"MIT"
] | 1
|
2019-10-29T18:14:23.000Z
|
2019-10-29T18:14:23.000Z
|
def solve(set_a, set_b, to_chase):
return min(set_a, key=lambda x: abs(x - to_chase)), min(set_b, key=lambda x: abs(x - to_chase))
| 45
| 99
| 0.681481
| 29
| 135
| 2.931034
| 0.448276
| 0.247059
| 0.235294
| 0.305882
| 0.494118
| 0.494118
| 0.494118
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 135
| 2
| 100
| 67.5
| 0.745614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
b34b28a64b36f43f024e790478b48693eec20219
| 23,707
|
py
|
Python
|
arduino_iot_rest/api/devices_v2_pass_api.py
|
arduino/iot-client-py
|
0f17200aa0939b960ba1ddff146cca46643ee268
|
[
"Apache-2.0"
] | 13
|
2020-01-19T10:54:35.000Z
|
2022-02-27T22:43:21.000Z
|
arduino_iot_rest/api/devices_v2_pass_api.py
|
arduino/iot-client-py
|
0f17200aa0939b960ba1ddff146cca46643ee268
|
[
"Apache-2.0"
] | 10
|
2019-11-26T04:39:32.000Z
|
2021-03-25T07:46:39.000Z
|
arduino_iot_rest/api/devices_v2_pass_api.py
|
arduino/iot-client-py
|
0f17200aa0939b960ba1ddff146cca46643ee268
|
[
"Apache-2.0"
] | 10
|
2020-01-19T10:54:42.000Z
|
2021-12-09T05:46:20.000Z
|
# coding: utf-8
"""
Arduino IoT Cloud API
Provides a set of endpoints to manage Arduino IoT Cloud **Devices**, **Things**, **Properties** and **Timeseries**. This API can be called just with any HTTP Client, or using one of these clients: * [Javascript NPM package](https://www.npmjs.com/package/@arduino/arduino-iot-client) * [Python PYPI Package](https://pypi.org/project/arduino-iot-client/) * [Golang Module](https://github.com/arduino/iot-client-go) # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from arduino_iot_rest.api_client import ApiClient
from arduino_iot_rest.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class DevicesV2PassApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def devices_v2_pass_check(self, id, check_devices_v2_pass_payload, **kwargs): # noqa: E501
"""check devices_v2_pass # noqa: E501
Check if the password matches. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.devices_v2_pass_check(id, check_devices_v2_pass_payload, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the device (required)
:param CheckDevicesV2PassPayload check_devices_v2_pass_payload: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.devices_v2_pass_check_with_http_info(id, check_devices_v2_pass_payload, **kwargs) # noqa: E501
def devices_v2_pass_check_with_http_info(self, id, check_devices_v2_pass_payload, **kwargs): # noqa: E501
"""check devices_v2_pass # noqa: E501
Check if the password matches. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.devices_v2_pass_check_with_http_info(id, check_devices_v2_pass_payload, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the device (required)
:param CheckDevicesV2PassPayload check_devices_v2_pass_payload: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'check_devices_v2_pass_payload'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method devices_v2_pass_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `devices_v2_pass_check`") # noqa: E501
# verify the required parameter 'check_devices_v2_pass_payload' is set
if self.api_client.client_side_validation and ('check_devices_v2_pass_payload' not in local_var_params or # noqa: E501
local_var_params['check_devices_v2_pass_payload'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `check_devices_v2_pass_payload` when calling `devices_v2_pass_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'check_devices_v2_pass_payload' in local_var_params:
body_params = local_var_params['check_devices_v2_pass_payload']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/{id}/pass', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def devices_v2_pass_delete(self, id, **kwargs): # noqa: E501
"""delete devices_v2_pass # noqa: E501
Removes the password for the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.devices_v2_pass_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the device (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.devices_v2_pass_delete_with_http_info(id, **kwargs) # noqa: E501
def devices_v2_pass_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""delete devices_v2_pass # noqa: E501
Removes the password for the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.devices_v2_pass_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the device (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method devices_v2_pass_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `devices_v2_pass_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/{id}/pass', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def devices_v2_pass_get(self, id, **kwargs): # noqa: E501
"""get devices_v2_pass # noqa: E501
Returns whether the password for this device is set or not. It doesn't return the password. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.devices_v2_pass_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the device (required)
:param bool suggested_password: If true, return a suggested password
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoDevicev2Pass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.devices_v2_pass_get_with_http_info(id, **kwargs) # noqa: E501
def devices_v2_pass_get_with_http_info(self, id, **kwargs): # noqa: E501
"""get devices_v2_pass # noqa: E501
Returns whether the password for this device is set or not. It doesn't return the password. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.devices_v2_pass_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the device (required)
:param bool suggested_password: If true, return a suggested password
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoDevicev2Pass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'suggested_password'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method devices_v2_pass_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `devices_v2_pass_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'suggested_password' in local_var_params and local_var_params['suggested_password'] is not None: # noqa: E501
query_params.append(('suggested_password', local_var_params['suggested_password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/{id}/pass', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoDevicev2Pass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def devices_v2_pass_set(self, id, devicev2_pass, **kwargs): # noqa: E501
"""set devices_v2_pass # noqa: E501
Sets the password for the device. It can never be read back. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.devices_v2_pass_set(id, devicev2_pass, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the device (required)
:param Devicev2Pass devicev2_pass: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoDevicev2Pass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.devices_v2_pass_set_with_http_info(id, devicev2_pass, **kwargs) # noqa: E501
def devices_v2_pass_set_with_http_info(self, id, devicev2_pass, **kwargs): # noqa: E501
"""set devices_v2_pass # noqa: E501
Sets the password for the device. It can never be read back. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.devices_v2_pass_set_with_http_info(id, devicev2_pass, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the device (required)
:param Devicev2Pass devicev2_pass: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoDevicev2Pass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id',
'devicev2_pass'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method devices_v2_pass_set" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `devices_v2_pass_set`") # noqa: E501
# verify the required parameter 'devicev2_pass' is set
if self.api_client.client_side_validation and ('devicev2_pass' not in local_var_params or # noqa: E501
local_var_params['devicev2_pass'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `devicev2_pass` when calling `devices_v2_pass_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'devicev2_pass' in local_var_params:
body_params = local_var_params['devicev2_pass']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/{id}/pass', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoDevicev2Pass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 45.15619
| 434
| 0.597503
| 2,719
| 23,707
| 4.947407
| 0.079441
| 0.043414
| 0.061404
| 0.026762
| 0.924844
| 0.910497
| 0.906259
| 0.899792
| 0.882471
| 0.875483
| 0
| 0.020299
| 0.328806
| 23,707
| 524
| 435
| 45.242366
| 0.825101
| 0.451175
| 0
| 0.705645
| 0
| 0
| 0.181834
| 0.066137
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03629
| false
| 0.169355
| 0.020161
| 0
| 0.092742
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
b374c27c50014e5ec53b549578c6e4cf909f7002
| 168
|
py
|
Python
|
acdc_nn/__init__.py
|
compbiomed-unito/acdc-nn
|
0800a5904c36302f19e48e2d2f7ddae9686f3366
|
[
"MIT"
] | 2
|
2021-07-13T21:41:39.000Z
|
2022-01-27T23:51:10.000Z
|
acdc_nn/__init__.py
|
compbiomed-unito/acdc-nn
|
0800a5904c36302f19e48e2d2f7ddae9686f3366
|
[
"MIT"
] | 1
|
2021-09-15T15:53:39.000Z
|
2021-09-15T15:53:39.000Z
|
acdc_nn/__init__.py
|
compbiomed-unito/acdc-nn
|
0800a5904c36302f19e48e2d2f7ddae9686f3366
|
[
"MIT"
] | 4
|
2021-07-13T21:41:40.000Z
|
2022-01-27T16:41:49.000Z
|
#from acdc_nn.cmd import main
#from acdc_nn import nn
from acdc_nn import nn
from acdc_nn import util
from acdc_nn.acdc_nn import ACDC3D, ACDCSeq, load_prot, run_tests
| 28
| 65
| 0.821429
| 33
| 168
| 3.939394
| 0.393939
| 0.276923
| 0.384615
| 0.369231
| 0.4
| 0.4
| 0.4
| 0.4
| 0.4
| 0
| 0
| 0.006897
| 0.136905
| 168
| 5
| 66
| 33.6
| 0.889655
| 0.297619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2fcdca86944ffd2d43f5d30098891e0eb527b9b6
| 114
|
py
|
Python
|
8_kyu/Geometry_Basics_Distance_between_points_in_2D.py
|
UlrichBerntien/Codewars-Katas
|
bbd025e67aa352d313564d3862db19fffa39f552
|
[
"MIT"
] | null | null | null |
8_kyu/Geometry_Basics_Distance_between_points_in_2D.py
|
UlrichBerntien/Codewars-Katas
|
bbd025e67aa352d313564d3862db19fffa39f552
|
[
"MIT"
] | null | null | null |
8_kyu/Geometry_Basics_Distance_between_points_in_2D.py
|
UlrichBerntien/Codewars-Katas
|
bbd025e67aa352d313564d3862db19fffa39f552
|
[
"MIT"
] | null | null | null |
import math
def distance_between_points(a, b):
return math.sqrt( math.pow(a.x-b.x,2) + math.pow(a.y-b.y,2) )
| 22.8
| 65
| 0.666667
| 25
| 114
| 2.96
| 0.56
| 0.189189
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0.140351
| 114
| 4
| 66
| 28.5
| 0.734694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
ff973b1e264a5504622891222a1f7609e0232cb1
| 11,307
|
py
|
Python
|
ansible-devel/test/units/utils/test_plugin_docs.py
|
satishcarya/ansible
|
ed091e174c26316f621ac16344a95c99f56bdc43
|
[
"MIT"
] | null | null | null |
ansible-devel/test/units/utils/test_plugin_docs.py
|
satishcarya/ansible
|
ed091e174c26316f621ac16344a95c99f56bdc43
|
[
"MIT"
] | null | null | null |
ansible-devel/test/units/utils/test_plugin_docs.py
|
satishcarya/ansible
|
ed091e174c26316f621ac16344a95c99f56bdc43
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# (c) 2020 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import copy
import pytest
from ansible.utils.plugin_docs import (
add_collection_to_versions_and_dates,
)
ADD_TESTS = [
(
# Module options
True,
False,
{
'author': 'x',
'version_added': '1.0.0',
'deprecated': {
'removed_in': '2.0.0',
},
'options': {
'test': {
'description': '',
'type': 'str',
'version_added': '1.1.0',
'deprecated': {
# should not be touched since this isn't a plugin
'removed_in': '2.0.0',
},
'env': [
# should not be touched since this isn't a plugin
{
'version_added': '1.3.0',
'deprecated': {
'version': '2.0.0',
},
},
],
'ini': [
# should not be touched since this isn't a plugin
{
'version_added': '1.3.0',
'deprecated': {
'version': '2.0.0',
},
},
],
'vars': [
# should not be touched since this isn't a plugin
{
'version_added': '1.3.0',
'deprecated': {
'removed_at_date': '2020-01-01',
},
},
],
},
'subtest': {
'description': '',
'type': 'dict',
'deprecated': {
# should not be touched since this isn't a plugin
'version': '2.0.0',
},
'suboptions': {
'suboption': {
'description': '',
'type': 'int',
'version_added': '1.2.0',
}
},
}
},
},
{
'author': 'x',
'version_added': '1.0.0',
'version_added_collection': 'foo.bar',
'deprecated': {
'removed_in': '2.0.0',
'removed_from_collection': 'foo.bar',
},
'options': {
'test': {
'description': '',
'type': 'str',
'version_added': '1.1.0',
'version_added_collection': 'foo.bar',
'deprecated': {
# should not be touched since this isn't a plugin
'removed_in': '2.0.0',
},
'env': [
# should not be touched since this isn't a plugin
{
'version_added': '1.3.0',
'deprecated': {
'version': '2.0.0',
},
},
],
'ini': [
# should not be touched since this isn't a plugin
{
'version_added': '1.3.0',
'deprecated': {
'version': '2.0.0',
},
},
],
'vars': [
# should not be touched since this isn't a plugin
{
'version_added': '1.3.0',
'deprecated': {
'removed_at_date': '2020-01-01',
},
},
],
},
'subtest': {
'description': '',
'type': 'dict',
'deprecated': {
# should not be touched since this isn't a plugin
'version': '2.0.0',
},
'suboptions': {
'suboption': {
'description': '',
'type': 'int',
'version_added': '1.2.0',
'version_added_collection': 'foo.bar',
}
},
}
},
},
),
(
# Module options
True,
False,
{
'author': 'x',
'deprecated': {
'removed_at_date': '2020-01-01',
},
},
{
'author': 'x',
'deprecated': {
'removed_at_date': '2020-01-01',
'removed_from_collection': 'foo.bar',
},
},
),
(
# Plugin options
False,
False,
{
'author': 'x',
'version_added': '1.0.0',
'deprecated': {
'removed_in': '2.0.0',
},
'options': {
'test': {
'description': '',
'type': 'str',
'version_added': '1.1.0',
'deprecated': {
# should not be touched since this is the wrong name
'removed_in': '2.0.0',
},
'env': [
{
'version_added': '1.3.0',
'deprecated': {
'version': '2.0.0',
},
},
],
'ini': [
{
'version_added': '1.3.0',
'deprecated': {
'version': '2.0.0',
},
},
],
'vars': [
{
'version_added': '1.3.0',
'deprecated': {
'removed_at_date': '2020-01-01',
},
},
],
},
'subtest': {
'description': '',
'type': 'dict',
'deprecated': {
'version': '2.0.0',
},
'suboptions': {
'suboption': {
'description': '',
'type': 'int',
'version_added': '1.2.0',
}
},
}
},
},
{
'author': 'x',
'version_added': '1.0.0',
'version_added_collection': 'foo.bar',
'deprecated': {
'removed_in': '2.0.0',
'removed_from_collection': 'foo.bar',
},
'options': {
'test': {
'description': '',
'type': 'str',
'version_added': '1.1.0',
'version_added_collection': 'foo.bar',
'deprecated': {
# should not be touched since this is the wrong name
'removed_in': '2.0.0',
},
'env': [
{
'version_added': '1.3.0',
'version_added_collection': 'foo.bar',
'deprecated': {
'version': '2.0.0',
'collection_name': 'foo.bar',
},
},
],
'ini': [
{
'version_added': '1.3.0',
'version_added_collection': 'foo.bar',
'deprecated': {
'version': '2.0.0',
'collection_name': 'foo.bar',
},
},
],
'vars': [
{
'version_added': '1.3.0',
'version_added_collection': 'foo.bar',
'deprecated': {
'removed_at_date': '2020-01-01',
'collection_name': 'foo.bar',
},
},
],
},
'subtest': {
'description': '',
'type': 'dict',
'deprecated': {
'version': '2.0.0',
'collection_name': 'foo.bar',
},
'suboptions': {
'suboption': {
'description': '',
'type': 'int',
'version_added': '1.2.0',
'version_added_collection': 'foo.bar',
}
},
}
},
},
),
(
# Return values
True, # this value is is ignored
True,
{
'rv1': {
'version_added': '1.0.0',
'type': 'dict',
'contains': {
'srv1': {
'version_added': '1.1.0',
},
'srv2': {
},
}
},
},
{
'rv1': {
'version_added': '1.0.0',
'version_added_collection': 'foo.bar',
'type': 'dict',
'contains': {
'srv1': {
'version_added': '1.1.0',
'version_added_collection': 'foo.bar',
},
'srv2': {
},
}
},
},
),
]
@pytest.mark.parametrize('is_module,return_docs,fragment,expected_fragment', ADD_TESTS)
def test_add(is_module, return_docs, fragment, expected_fragment):
fragment_copy = copy.deepcopy(fragment)
add_collection_to_versions_and_dates(fragment_copy, 'foo.bar', is_module, return_docs)
assert fragment_copy == expected_fragment
| 33.853293
| 92
| 0.283187
| 711
| 11,307
| 4.340366
| 0.144866
| 0.151653
| 0.117952
| 0.069994
| 0.850292
| 0.841542
| 0.799741
| 0.767984
| 0.765716
| 0.701879
| 0
| 0.045817
| 0.600425
| 11,307
| 333
| 93
| 33.954955
| 0.637229
| 0.072521
| 0
| 0.633987
| 0
| 0
| 0.215555
| 0.036404
| 0
| 0
| 0
| 0
| 0.003268
| 1
| 0.003268
| false
| 0
| 0.013072
| 0
| 0.01634
| 0.003268
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ff9766ad1f0f0c936b0e4d25ba731dbea75eaecd
| 164
|
py
|
Python
|
tests/test_old_imports.py
|
Tapot/evidently
|
ab9b91425d622566b663565508dd1c43e741f515
|
[
"Apache-2.0"
] | null | null | null |
tests/test_old_imports.py
|
Tapot/evidently
|
ab9b91425d622566b663565508dd1c43e741f515
|
[
"Apache-2.0"
] | null | null | null |
tests/test_old_imports.py
|
Tapot/evidently
|
ab9b91425d622566b663565508dd1c43e741f515
|
[
"Apache-2.0"
] | null | null | null |
from evidently.widgets import * # noqa
from evidently.tabs import * # noqa
from evidently.profile_sections import * # noqa
def test_old_style_imports():
pass
| 20.5
| 47
| 0.762195
| 22
| 164
| 5.5
| 0.636364
| 0.322314
| 0.231405
| 0.380165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164634
| 164
| 7
| 48
| 23.428571
| 0.883212
| 0.085366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0.2
| 0.8
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 8
|
ffbb0ea6656098781582e079b3b18de6c0a75cd0
| 124
|
py
|
Python
|
build/lib/pyEp/__init__.py
|
mlab-upenn/pyEp
|
14435158bba4c11df43dfac6b662e81d7d0029b9
|
[
"MIT"
] | 11
|
2018-06-20T16:09:50.000Z
|
2021-06-28T18:48:01.000Z
|
build/lib/pyEp/__init__.py
|
mlab-upenn/pyEp
|
14435158bba4c11df43dfac6b662e81d7d0029b9
|
[
"MIT"
] | 4
|
2018-05-09T18:14:52.000Z
|
2018-08-21T13:59:52.000Z
|
pyEp/__init__.py
|
mlab-upenn/pyEp
|
14435158bba4c11df43dfac6b662e81d7d0029b9
|
[
"MIT"
] | 2
|
2020-02-16T07:52:45.000Z
|
2021-09-19T05:19:41.000Z
|
from .pyEp import ep_process
from .pyEp import set_eplus_dir
from .pyEp import socket_builder
from .pyEp.pyEpError import *
| 24.8
| 32
| 0.822581
| 20
| 124
| 4.9
| 0.55
| 0.326531
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 124
| 4
| 33
| 31
| 0.907407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4415890ad1ccb925c42d80066b7de3566fdd972b
| 86
|
py
|
Python
|
Release/Tests/AnalysisTest/Python.VS.TestData/Grammar/DelStmt.py
|
rsumner33/PTVS
|
f5d67cff8c7bb32992dd4f77c0dfddaca6071250
|
[
"Apache-2.0"
] | null | null | null |
Release/Tests/AnalysisTest/Python.VS.TestData/Grammar/DelStmt.py
|
rsumner33/PTVS
|
f5d67cff8c7bb32992dd4f77c0dfddaca6071250
|
[
"Apache-2.0"
] | null | null | null |
Release/Tests/AnalysisTest/Python.VS.TestData/Grammar/DelStmt.py
|
rsumner33/PTVS
|
f5d67cff8c7bb32992dd4f77c0dfddaca6071250
|
[
"Apache-2.0"
] | 1
|
2020-12-09T10:16:23.000Z
|
2020-12-09T10:16:23.000Z
|
del foo
del foo, bar
del foo.bar
del foo[bar]
del (foo, bar)
del [foo, bar]
del (foo)
| 12.285714
| 14
| 0.662791
| 20
| 86
| 2.9
| 0.2
| 0.724138
| 0.775862
| 1.034483
| 0.87931
| 0.87931
| 0.87931
| 0.87931
| 0.87931
| 0.87931
| 0
| 0
| 0.186047
| 86
| 7
| 15
| 12.285714
| 0.814286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
92b1cfe3dfddbb420f891cba76e5b630965f350d
| 131
|
py
|
Python
|
galera_node_health/examples.py
|
breakgard/galera-node-health
|
a26913c389740918d528829925b796dddd30f0f1
|
[
"MIT"
] | 1
|
2019-10-22T12:29:53.000Z
|
2019-10-22T12:29:53.000Z
|
galera_node_health/examples.py
|
breakgard/galera-node-health
|
a26913c389740918d528829925b796dddd30f0f1
|
[
"MIT"
] | null | null | null |
galera_node_health/examples.py
|
breakgard/galera-node-health
|
a26913c389740918d528829925b796dddd30f0f1
|
[
"MIT"
] | null | null | null |
import pkgutil
def print_config():
print(pkgutil.get_data('galera_node_health', 'example_files/config.cfg').decode('ascii'))
| 21.833333
| 93
| 0.755725
| 18
| 131
| 5.222222
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091603
| 131
| 5
| 94
| 26.2
| 0.789916
| 0
| 0
| 0
| 0
| 0
| 0.358779
| 0.183206
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
2bf9c417510015e800891c5b2d93ea0d6bd47f66
| 41,849
|
py
|
Python
|
web/transiq/restapi/tests/tests_user_initial_data.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
web/transiq/restapi/tests/tests_user_initial_data.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | 14
|
2020-06-05T23:06:45.000Z
|
2022-03-12T00:00:18.000Z
|
web/transiq/restapi/tests/tests_user_initial_data.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
import json
from model_mommy import mommy
from rest_framework.test import APITestCase
from django.urls import reverse
from rest_framework import status
from django.contrib.auth.models import User, Group
from broker.models import Broker
from employee.models import Employee
from restapi.models import UserCategory, EmployeeRoles, EmployeeRolesMapping, TaskDashboardFunctionalities, \
EmployeeRolesFunctionalityMapping
from sme.models import Sme
from supplier.models import Supplier
class UserInitialDataTests(APITestCase):
def setUp(self):
self.login_url = reverse('login')
self.logout_url = reverse('logout')
self.user = User.objects.create_user(username='john_doe',
email='harsh@gmail.com',
password='text1234')
self.login_data = self.client.post(self.login_url, {'username': 'john_doe', 'password': 'text1234'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.token = "Token {}".format(self.login_data["token"])
self.customer_user = User.objects.create_user(username='david',
email='david12@gmail.com',
password='pqrs1234'
)
self.sme = Sme.objects.create(name=self.customer_user)
sme_group = Group.objects.create(name='sme')
self.customer_user.groups.add(sme_group)
user_category = mommy.make(UserCategory, category='Customer')
self.customer_category_id = user_category.id
self.login_data = self.client.post(self.login_url, {'username': 'david', 'password': 'pqrs1234'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.customer_token = "Token {}".format(self.login_data["token"])
self.supplier_user = User.objects.create_user(username='james',
email='harshadasawant89@gmail.com',
password='pwd12345'
)
self.supplier = Supplier.objects.create(user=self.supplier_user)
user_supplier_category = mommy.make(UserCategory, category='Supplier')
self.supplier_category_id = user_supplier_category.id
self.login_data = self.client.post(self.login_url, {'username': 'james', 'password': 'pwd12345'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.supplier_token = "Token {}".format(self.login_data["token"])
self.broker_user = User.objects.create_user(username='sam',
email='harshadasawant89@gmail.com',
password='abc12345'
)
self.broker = Broker.objects.create(name=self.broker_user)
user_broker_category = mommy.make(UserCategory, category='Broker')
self.broker_category_id = user_broker_category.id
self.login_data = self.client.post(self.login_url, {'username': 'sam', 'password': 'abc12345'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.broker_token = "Token {}".format(self.login_data["token"])
"""
Test ID:TS01AH00088
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:success
Message:wrong method
Status code:405
"""
def test_user_initial_data_405_wrong_method(self):
# Negative test for getting user initial data with wrong method
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
"""
Test ID:TS01AH00089
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:failure
Message:no header
Status code:401
"""
def test_user_initial_data_401_no_header(self):
# Negative test for getting user initial data with no HTTP Header Authorization token
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01AH00090
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:failure
Message:blank token
Status code:401
"""
def test_user_initial_data_401_blank_token(self):
# Negative test case for getting user initial data with blank HTTP Header Authorization token
self.token = ""
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01AH00091
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:failure
Message:wromg token
Status code:401
"""
def test_user_initial_data_401_wrong_token(self):
# Negative test case for getting user initial data with wrong HTTP Header Authorization token
token = "Token 806fa0efd3ce26fe080f65da4ad5a137e1d056ff"
self.client.credentials(HTTP_AUTHORIZATION=token)
response = self.client.post("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01AH00091
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:failure
Message:expired token
Status code:401
"""
def test_user_initial_data_401_expired_token(self):
# Negative test case for getting user initial data with expired HTTP Header Authorization token
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.delete(self.logout_url)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01AH00094
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:blank category id
Status code:400
"""
def test_user_initial_data_400_blank_category_id(self):
# Negative test case for getting user initial data with HTTP Header Authorization token but blank category_id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
self.customer_category_id = ""
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['msg'], "category_id field can not be blank")
self.assertEqual(response.data['status'], "failure")
"""
Test ID:TS01AH00093
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:user category does not exist
Status code:401
"""
def test_user_initial_data_400_wrong_category_id(self):
# Negative test case for getting user initial data with HTTP Header Authorization token but wrong category id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
self.customer_category_id = 100
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
self.assertEqual(response.data['msg'], "User Category Does Not Exist")
"""
Test ID:TS01AH00096
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:user category cannot be blank
Status code:400
"""
def test_user_initial_data_400_non_customer_token(self):
# Negative test case for getting user initial data with HTTP Header Authorization token of non-customer
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
self.assertEqual(response.data['msg'], "User Customer does not exist")
"""
Test ID:TS01AH00095
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:User Category should be a number
Status code:400
"""
def test_user_initial_data_400_non_supplier_token(self):
# Negative test case for getting user initial data with HTTP Header Authorization token of non-supplier
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.supplier_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
self.assertEqual(response.data['msg'], "User Supplier does not exist")
"""
Test ID:TS01AH00097
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:User Category should be a a valid one
Status code:400
"""
def test_user_initial_data_400_non_broker_token(self):
# Negative test case for getting user initial data with HTTP Header Authorization token of non-broker
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.broker_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
self.assertEqual(response.data['msg'], "User Broker does not exist")
"""
Test ID:TS01AH00098
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:success
Message:User Category valid
Status code:200
"""
def test_user_initial_data_200_customer_token(self):
# Positive test case for getting customer token and category_id
self.client.credentials(HTTP_AUTHORIZATION=self.customer_token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], "success")
"""
Test ID:TS01AH000100
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:User Category valid
Status code:200
"""
def test_user_initial_data_200_supplier_token(self):
# Positive test case for getting supplier token and category_id
self.client.credentials(HTTP_AUTHORIZATION=self.supplier_token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.supplier_category_id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], "success")
"""
Test ID:TS01AH00099
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:User Category valid
Status code:200
"""
def test_user_initial_data_200_broker_token(self):
# Positive test case for getting broker token and category_id
self.client.credentials(HTTP_AUTHORIZATION=self.broker_token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.broker_category_id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['status'], "success")
# class for user-initial-td details
class UserInitialTDFunctionalitiesDataTests(APITestCase):
def setUp(self):
self.login_url = reverse('login')
self.logout_url = reverse('logout')
self.user = User.objects.create_user(username='john_doe',
email='harsh@gmail.com',
password='text1234')
self.login_data = self.client.post(self.login_url, {'username': 'john_doe', 'password': 'text1234'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.token = "Token {}".format(self.login_data["token"])
# Employee
self.employee_user = User.objects.create_user(username='james',
email='harshadasawant89@gmail.com',
password='pwd12345'
)
self.employee = Employee.objects.create(username=self.employee_user)
user_employee_category = mommy.make(UserCategory, category='Employee')
self.employee_category_name = user_employee_category.category
self.login_data = self.client.post(self.login_url, {'username': 'james', 'password': 'pwd12345'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.employee_token = "Token {}".format(self.login_data["token"])
"""
Test ID:TS01AH00102
Created By:Hari
Created On:07/12/2018
Scenario:get user initial td data/
Status:failure
Message:invalid method header
Status code:405
"""
def test_user_tb_initial_data_405_wrong_method(self):
# Negative test for getting user initial data with wrong method
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
"""
Test ID:TS01AH00103
Created By:Hari
Created On:07/12/2018
Scenario:get user initial td data/
Status:failure
Message:no auth credentials provided
Status code:401
"""
def test_user_tb_initial_data_401_no_header(self):
# Negative test for getting user initial data with no HTTP Header Authorization token
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01AH00104
Created By:Hari
Created On:07/12/2018
Scenario:get user initial td data/
Status:failure
Message:blank token
Status code:401
"""
def test_user_tb_initial_data_401_blank_token(self):
# Negative test case for getting user initial data with blank HTTP Header Authorization token
self.token = ""
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01AH00105
Created By:Hari
Created On:06/12/2018
Scenario:get user initial td data/
Status:failure
Message:wromg token
Status code:401
"""
def test_user_tb_initial_data_401_wrong_token(self):
# Negative test case for getting user initial data with wrong HTTP Header Authorization token
token = "Token 806fa0efd3ce2khn080f65da4ad5hg3je1d056ff"
self.client.credentials(HTTP_AUTHORIZATION=token)
response = self.client.post("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01AH00105
Created By:Hari
Created On:06/12/2018
Scenario:get user initial td data/
Status:failure
Message:expired token
Status code:401
"""
def test_user_tb_initial_data_401_expired_token(self):
# Negative test case for getting user initial data with expired HTTP Header Authorization token
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.delete(self.logout_url)
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01AH00106
Created By:Hari
Created On:06/12/2018
Scenario:get user initial td data/
Status:failure
Message:expired token
Status code:401
"""
def test_user_tb_initial_data_401_non_employee_category(self):
# Negative test case for getting user initial data with HTTP Header Authorization token of non-broker
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
self.assertEqual(response.data['msg'], "User category is not found")
"""
Test ID:TS01AH00107
Created By:Hari
Created On:07/12/2018
Scenario:get user initial td data/
Status:failure
Message:blank category
Status code:400
"""
def test_user_tb_initial_data_400_blank_category_name(self):
# Negative test case for getting user initial td data with HTTP Header Authorization token but blank category
self.client.credentials(HTTP_AUTHORIZATION=self.token)
self.employee_category_name = ""
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['msg'], "category field can not be blank")
self.assertEqual(response.data['status'], "failure")
"""
Test ID:TS01AH00108
Created By:Hari
Created On:07/12/2018
Scenario:get user td initial data/
Status:failure
Message:User Category valid
Status code:200
"""
def test_user_tb_initial_data_200_employee_category_sucess(self):
# Positive test case for getting employee category
self.client.credentials(HTTP_AUTHORIZATION=self.token)
# self.employee_category_name="employee"
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['msg'], "Employee TD Functionalities retrieved")
self.assertEqual(response.data['status'], "success")
class UserInitialDataTests(APITestCase):
def setUp(self):
self.login_url = reverse('login')
self.logout_url = reverse('logout')
self.user = User.objects.create_user(username='john_doe',
email='harsh@gmail.com',
password='text1234')
self.login_data = self.client.post(self.login_url, {'username': 'john_doe', 'password': 'text1234'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.token = "Token {}".format(self.login_data["token"])
self.customer_user = User.objects.create_user(username='david',
email='david12@gmail.com',
password='pqrs1234'
)
self.sme = Sme.objects.create(name=self.customer_user)
sme_group = Group.objects.create(name='sme')
self.customer_user.groups.add(sme_group)
user_category = mommy.make(UserCategory, category='Customer')
self.customer_category_id = user_category.id
self.login_data = self.client.post(self.login_url, {'username': 'david', 'password': 'pqrs1234'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.customer_token = "Token {}".format(self.login_data["token"])
self.supplier_user = User.objects.create_user(username='james',
email='harshadasawant89@gmail.com',
password='pwd12345'
)
self.supplier = Supplier.objects.create(user=self.supplier_user)
user_supplier_category = mommy.make(UserCategory, category='Supplier')
self.supplier_category_id = user_supplier_category.id
self.login_data = self.client.post(self.login_url, {'username': 'james', 'password': 'pwd12345'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.supplier_token = "Token {}".format(self.login_data["token"])
self.broker_user = User.objects.create_user(username='sam',
email='harshadasawant89@gmail.com',
password='abc12345'
)
self.broker = Broker.objects.create(name=self.broker_user)
user_broker_category = mommy.make(UserCategory, category='Broker')
self.broker_category_id = user_broker_category.id
self.login_data = self.client.post(self.login_url, {'username': 'sam', 'password': 'abc12345'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.broker_token = "Token {}".format(self.login_data["token"])
"""
Test ID:TS01AH00088
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:success
Message:wrong method
Status code:405
"""
def test_user_initial_data_405_wrong_method(self):
# Negative test for getting user initial data with wrong method
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
"""
Test ID:TS01AH00089
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:failure
Message:no header
Status code:401
"""
def test_user_initial_data_401_no_header(self):
# Negative test for getting user initial data with no HTTP Header Authorization token
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01AH00090
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:failure
Message:blank token
Status code:401
"""
def test_user_initial_data_401_blank_token(self):
# Negative test case for getting user initial data with blank HTTP Header Authorization token
self.token = ""
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01AH00091
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:failure
Message:wrong token
Status code:401
"""
def test_user_initial_data_401_wrong_token(self):
# Negative test case for getting user initial data with wrong HTTP Header Authorization token
token = "Token 806fa0efd3ce26fe080f65da4ad5a137e1d056ff"
self.client.credentials(HTTP_AUTHORIZATION=token)
response = self.client.post("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01AH00091
Created By:Hari
Created On:06/12/2018
Scenario:get user initial data/
Status:failure
Message:expired token
Status code:401
"""
def test_user_initial_data_401_expired_token(self):
# Negative test case for getting user initial data with expired HTTP Header Authorization token
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.delete(self.logout_url)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01AH00094
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:blank category id
Status code:400
"""
def test_user_initial_data_400_blank_category_id(self):
# Negative test case for getting user initial data with HTTP Header Authorization token but blank category_id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
self.customer_category_id = ""
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['msg'], "category_id field can not be blank")
self.assertEqual(response.data['status'], "failure")
"""
Test ID:TS01AH00093
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:user category does not exist
Status code:401
"""
def test_user_initial_data_400_wrong_category_id(self):
# Negative test case for getting user initial data with HTTP Header Authorization token but wrong category id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
self.customer_category_id = 100
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
self.assertEqual(response.data['msg'], "User Category Does Not Exist")
"""
Test ID:TS01AH00096
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:user category cannot be blank
Status code:400
"""
def test_user_initial_data_400_non_customer_token(self):
# Negative test case for getting user initial data with HTTP Header Authorization token of non-customer
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
self.assertEqual(response.data['msg'], "User Customer does not exist")
"""
Test ID:TS01AH00095
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:User Category should be a number
Status code:400
"""
def test_user_initial_data_400_non_supplier_token(self):
# Negative test case for getting user initial data with HTTP Header Authorization token of non-supplier
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.supplier_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
self.assertEqual(response.data['msg'], "User Supplier does not exist")
"""
Test ID:TS01AH00097
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:User Category should be a a valid one
Status code:400
"""
def test_user_initial_data_400_non_broker_token(self):
# Negative test case for getting user initial data with HTTP Header Authorization token of non-broker
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.broker_category_id))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
"""
Test ID:TS01AH00098
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:success
Message:User Category valid
Status code:200
"""
def test_user_initial_data_200_customer_token(self):
# Positive test case for getting customer token and category_id
self.client.credentials(HTTP_AUTHORIZATION=self.customer_token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.customer_category_id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
"""
Test ID:TS01AH000100
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:User Category valid
Status code:200
"""
def test_user_initial_data_200_supplier_token(self):
# Positive test case for getting supplier token and category_id
self.client.credentials(HTTP_AUTHORIZATION=self.supplier_token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.supplier_category_id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
"""
Test ID:TS01AH00099
Created By:Hari
Created On:07/12/2018
Scenario:get user initial data/
Status:failure
Message:User Category valid
Status code:200
"""
def test_user_initial_data_200_broker_token(self):
# Positive test case for getting broker token and category_id
self.client.credentials(HTTP_AUTHORIZATION=self.broker_token)
response = self.client.get("/api/get-user-initial-data/?category_id={}".format(self.broker_category_id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
# class for user-initial-td details
class UserInitialTDFunctionalitiesDataTests(APITestCase):
def setUp(self):
self.login_url = reverse('login')
self.logout_url = reverse('logout')
self.user = User.objects.create_user(username='john_doe',
email='harsh@gmail.com',
password='text1234')
self.login_data = self.client.post(self.login_url, {'username': 'john_doe', 'password': 'text1234'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.token = "Token {}".format(self.login_data["token"])
# Employee
self.employee_user = User.objects.create_user(username='james',
email='harshadasawant89@gmail.com',
password='pwd12345'
)
self.employee = Employee.objects.create(username=self.employee_user)
employee_obj= self.employee
user_employee_category = mommy.make(UserCategory, category='employee')
self.employee_category_name = user_employee_category.category
user_employee_roletype = mommy.make(EmployeeRoles, role='ops_executive')
self.employee_role = user_employee_roletype.role
user_employee_rolesmapping = mommy.make(EmployeeRolesMapping, employee_status='active',employee_role=user_employee_roletype, employee=employee_obj)
self.employee_role = user_employee_rolesmapping.employee_status
user_employee_tdfunc = mommy.make(TaskDashboardFunctionalities, functionality='new_inquiry')
self.employee_role = user_employee_tdfunc.functionality
user_employee_erfm = mommy.make(EmployeeRolesFunctionalityMapping, caption='employee_ready',td_functionality=user_employee_tdfunc,employee_role=user_employee_roletype)
self.employee_role = user_employee_erfm.caption
self.login_data = self.client.post(self.login_url, {'username': 'james', 'password': 'pwd12345'}).content
self.login_data = json.loads(self.login_data.decode('utf8'))
self.employee_token = "Token {}".format(self.login_data["token"])
"""
Test ID:TS01AH00102
Created By:Hari
Created On:07/12/2018
Scenario:get user initial td data/
Status:failure
Message:invalid method header
Status code:405
"""
def test_user_tb_initial_data_405_wrong_method(self):
# Negative test for getting user initial data with wrong method
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
"""
Test ID:TS01AH00103
Created By:Hari
Created On:07/12/2018
Scenario:get user initial td data/
Status:failure
Message:no auth credentials provided
Status code:401
"""
def test_user_tb_initial_data_401_no_header(self):
# Negative test for getting user initial data with no HTTP Header Authorization token
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01AH00104
Created By:Hari
Created On:07/12/2018
Scenario:get user initial td data/
Status:failure
Message:blank token
Status code:401
"""
def test_user_tb_initial_data_401_blank_token(self):
# Negative test case for getting user initial data with blank HTTP Header Authorization token
self.token = ""
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Authentication credentials were not provided.")
"""
Test ID:TS01AH00105
Created By:Hari
Created On:06/12/2018
Scenario:get user initial td data/
Status:failure
Message:wromg token
Status code:401
"""
def test_user_tb_initial_data_401_wrong_token(self):
# Negative test case for getting user initial data with wrong HTTP Header Authorization token
token = "Token 806fa0efd3ce2khn080f65da4ad5hg3je1d056ff"
self.client.credentials(HTTP_AUTHORIZATION=token)
response = self.client.post("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01AH00105
Created By:Hari
Created On:06/12/2018
Scenario:get user initial td data/
Status:failure
Message:expired token
Status code:401
"""
def test_user_tb_initial_data_401_expired_token(self):
# Negative test case for getting user initial data with expired HTTP Header Authorization token
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.delete(self.logout_url)
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], "Invalid token.")
"""
Test ID:TS01AH00106
Created By:Hari
Created On:06/12/2018
Scenario:get user initial td data/
Status:failure
Message:expired token
Status code:401
"""
def test_user_tb_initial_data_401_non_employee_category(self):
# Negative test case for getting user initial data with HTTP Header Authorization token of non-broker
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['status'], "failure")
self.assertEqual(response.data['msg'], "Employee Roles not found")
"""
Test ID:TS01AH00107
Created By:Hari
Created On:07/12/2018
Scenario:get user initial td data/
Status:failure
Message:blank category
Status code:400
"""
def test_user_tb_initial_data_400_blank_category_name(self):
# Negative test case for getting user initial td data with HTTP Header Authorization token but blank category
self.client.credentials(HTTP_AUTHORIZATION=self.token)
self.employee_category_name = ""
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['msg'], "category field can not be blank")
self.assertEqual(response.data['status'], "failure")
"""
Test ID:TS01AH00108
Created By:Hari
Created On:07/12/2018
Scenario:get user td initial data/
Status:failure
Message:User Category valid
Status code:200
"""
def test_user_tb_initial_data_200_employee_category_sucess(self):
# Positive test case for getting employee category
self.client.credentials(HTTP_AUTHORIZATION=self.employee_token)
response = self.client.get("/api/get-user-initial-td-functionalities-data/?category={}".format(self.employee_category_name))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['msg'], "Employee TD Functionalities retrieved")
self.assertEqual(response.data['status'], "success")
| 44.097998
| 175
| 0.658582
| 4,910
| 41,849
| 5.447047
| 0.037271
| 0.059226
| 0.061694
| 0.034997
| 0.967471
| 0.960142
| 0.959843
| 0.959843
| 0.959843
| 0.959843
| 0
| 0.038971
| 0.245812
| 41,849
| 948
| 176
| 44.144515
| 0.808409
| 0.089417
| 0
| 0.907162
| 0
| 0
| 0.163472
| 0.0823
| 0
| 0
| 0
| 0
| 0.244032
| 1
| 0.122016
| false
| 0.06366
| 0.029178
| 0
| 0.161804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
a609b36e7e2cb1b8111292b039855b60df1e44f2
| 7,452
|
py
|
Python
|
scripts/geodyn1d/library.py
|
tth030/SM_ESR_isostasy
|
fbd2ac586e8e31dd18a0988181514bc2fff7f08a
|
[
"MIT"
] | null | null | null |
scripts/geodyn1d/library.py
|
tth030/SM_ESR_isostasy
|
fbd2ac586e8e31dd18a0988181514bc2fff7f08a
|
[
"MIT"
] | null | null | null |
scripts/geodyn1d/library.py
|
tth030/SM_ESR_isostasy
|
fbd2ac586e8e31dd18a0988181514bc2fff7f08a
|
[
"MIT"
] | null | null | null |
#
# lithospheres
#
# =============================================================================
lith200 = {
"numlayers": 7,
"nature_layers": ['matUC','matMC','matLC','matLM1','matLM2','matLM3','matSLM'],
"thicknesses": [15e3,10e3,10e3,45e3,45e3,75e3,400e3],
"thermalBc": ['thermBcUC','thermBcMC','thermBcLC','thermBcLM1','thermBcLM2','thermBcLM3','thermBcSLM'],
"matUC": {
"temp_top": 0.0e0,
"H": 1.78e-6
},
"matMC": {
"H": 1.78e-6
},
"matLC": {
"H": 0.82e-6
},
"matLM1": {
"rho": 3300
},
"matLM2": {
"rho": 3300
},
"matSLM": {
"rho": 3300
},
"thermBcSLM": {
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 12.4460937e-3
}
}
# =============================================================================
lith250 = {
"numlayers": 7,
"nature_layers": ['matUC','matMC','matLC','matLM1','matLM2','matLM3','matSLM'],
"thicknesses": [15e3,10e3,10e3,55e3,160e3,0.0e0,350e3],
"thermalBc": ['thermBcUC','thermBcMC','thermBcLC','thermBcLM1','thermBcLM2','thermBcLM3','thermBcSLM'],
"matUC": {
"temp_top": 0.0e0,
"H": 1.78e-6
},
"matMC": {
"H": 1.78e-6
},
"matLC": {
"H": 0.82e-6
},
"matLM1": {
"rho": 3300
},
"matLM2": {
"rho": 3300
},
"matSLM": {
"rho": 3300
},
"thermBcSLM": {
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 12.4460937e-3
}
}
# =============================================================================
lith240 = {
"numlayers": 7,
"nature_layers": ['matUC','matMC','matLC','matLM1','matLM2','matLM3','matSLM'],
"thicknesses": [15e3,10e3,10e3,55e3,150e3,0.0e0,360e3],
"thermalBc": ['thermBcUC','thermBcMC','thermBcLC','thermBcLM1','thermBcLM2','thermBcLM3','thermBcSLM'],
"matUC": {
"temp_top": 0.0e0,
"H": 1.78e-6
},
"matMC": {
"H": 1.78e-6
},
"matLC": {
"H": 0.82e-6
},
"matLM1": {
"rho": 3300
},
"matLM2": {
"rho": 3300
},
"matSLM": {
"rho": 3300
},
"thermBcSLM": {
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 12.4460937e-3
}
}
# =============================================================================
lith280 = {
"numlayers": 7,
"nature_layers": ['matUC','matMC','matLC','matLM1','matLM2','matLM3','matSLM'],
"thicknesses": [15e3,10e3,10e3,80e3,10e3,155e3,320e3],
"thermalBc": ['thermBcUC','thermBcMC','thermBcLC','thermBcLM1','thermBcLM2','thermBcLM3','thermBcSLM'],
"matUC": {
"temp_top": 0.0e0,
"H": 1.78e-6
},
"matMC": {
"H": 1.78e-6
},
"matLC": {
"H": 0.82e-6
},
"matLM1": {
"rho": 3300
},
"matLM2": {
"rho": 3300
},
"matSLM": {
"rho": 3300
},
"thermBcSLM": {
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 12.4460937e-3
}
}
# =============================================================================
lith160 = {
"numlayers": 7,
"nature_layers": ['matUC','matMC','matLC','matLM1','matLM2','matLM3','matSLM'],
"thicknesses": [15e3,10e3,10e3,55e3,70e3,0.0e0,440e3],
"thermalBc": ['thermBcUC','thermBcMC','thermBcLC','thermBcLM1','thermBcLM2','thermBcLM3','thermBcSLM'],
"matUC": {
"temp_top": 0.0e0,
"H": 1.78e-6
},
"matMC": {
"H": 1.78e-6
},
"matLC": {
"H": 0.82e-6
},
"matLM1": {
"rho": 3300
},
"matLM2": {
"rho": 3300
},
"matSLM": {
"rho": 3300
},
"thermBcSLM": {
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 12.4460937e-3
}
}
# =============================================================================
lith180 = {
"numlayers": 7,
"nature_layers": ['matUC','matMC','matLC','matLM1','matLM2','matLM3','matSLM'],
"thicknesses": [15e3,10e3,10e3,45e3,45e3,55e3,420e3],
"thermalBc": ['thermBcUC','thermBcMC','thermBcLC','thermBcLM1','thermBcLM2','thermBcLM3','thermBcSLM'],
"matUC": {
"temp_top": 0.0e0,
"H": 1.78e-6
},
"matMC": {
"H": 1.78e-6
},
"matLC": {
"H": 0.82e-6
},
"matLM1": {
"rho": 3300
},
"matLM2": {
"rho": 3300
},
"matSLM": {
"rho": 3300
},
"thermBcSLM": {
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 12.4460937e-3
}
}
# =============================================================================
lith120 = {
"numlayers": 6,
"nature_layers": ['matUC','matMC','matLC','matLM1','matLM2','matSLM'],
"thicknesses": [15e3,10e3,10e3,85.0e3,0.0e0,480e3],
"thermalBc": ['thermBcUC','thermBcMC','thermBcLC','thermBcLM1','thermBcLM2','thermBcSLM'],
"matUC": {
"temp_top": 0.0e0,
"H": 1.2e-6
},
"matMC": {
"H": 1.2e-6
},
"matLC": {
"H": 0.473e-6
},
"matLM1": {
"rho": 3300
},
"matLM2": {
"rho": 3300
},
"matSLM": {
"rho": 3300
},
"thermBcSLM": {
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 20.59375e-3
}
}
# =============================================================================
lith125 = {
"numlayers": 7,
"nature_layers": ['matUC','matMC','matLC','matLM1','matLM2','matLM3','matSLM'],
"thicknesses": [15e3,10e3,10e3,90e3,0.0e0,0.0e0,475e3],
"thermalBc": ['thermBcUC','thermBcMC','thermBcLC','thermBcLM1','thermBcLM2','thermBcLM3','thermBcSLM'],
"matUC": {
"temp_top": 0.0e0,
"H": 1.299e-6
},
"matMC": {
"H": 1.299e-6
},
"matLC": {
"H": 0.498e-6
},
"matLM1": {
"rho": 3300
},
"matLM2": {
"rho": 3300
},
"matSLM": {
"rho": 3300
},
"thermBcSLM": {
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 19.5e-3
}
}
# =============================================================================
ridge_NoOc_NoDiffLayer = {
"numlayers": 1,
"nature_layers": ['matSLM'],
"thicknesses": [600e3],
"thermalBc": ['thermBcSLM'],
"matSLM": {
"rho": 3300
},
"thermBcSLM": {
"temp_top": 0.0e0,
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 19.5e-3
}
}
# =============================================================================
ridge_Oc6_5_NoDiffLayer = {
"numlayers": 5,
"nature_layers": ['matUC','matMC','matLC','matSLMd','matSLM'],
"thicknesses": [6.5e3, 0.0e0, 0.0e0, 118.5e3, 475.0e3],
"thermalBc": ['thermBcUC','thermBcMC','thermBcLC','thermBcSLMd', 'thermBcSLM'],
"matUC": {
"temp_top": 0.0e0,
"H" : 0.0e0,
"rho": 2900.e0
},
"matMC": {
"H": 0.0e0,
"rho": 2900.e0
},
"matLC": {
"H": 0.0e0,
"rho": 2900.e0
},
"matSLMd": {
"rho": 3300,
"H": 0.0e0
},
"matSLM": {
"rho": 3300
},
"thermBcSLMd": {
"temp_bottom": 1603.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 437.2421875e-3
},
"thermBcSLM": {
"temp_bottom": 1793.15e0,
"temp_potential": 1553.15e0,
"q_bottom": 437.2421875e-3
}
}
| 23.214953
| 103
| 0.44109
| 690
| 7,452
| 4.67971
| 0.136232
| 0.058532
| 0.018582
| 0.022298
| 0.839269
| 0.813565
| 0.783524
| 0.763085
| 0.754413
| 0.754413
| 0
| 0.146302
| 0.254294
| 7,452
| 320
| 104
| 23.2875
| 0.434767
| 0.10628
| 0
| 0.61512
| 0
| 0
| 0.348337
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a64372894f95c010eb427479e9893be3e5d6df52
| 3,335
|
py
|
Python
|
augur/metrics/contributor/test_contributor_routes.py
|
computationalmystic/sengfs19-group3
|
6d636ce8ab1a50ce80f529d0130ae1a0b69c04d2
|
[
"MIT"
] | null | null | null |
augur/metrics/contributor/test_contributor_routes.py
|
computationalmystic/sengfs19-group3
|
6d636ce8ab1a50ce80f529d0130ae1a0b69c04d2
|
[
"MIT"
] | null | null | null |
augur/metrics/contributor/test_contributor_routes.py
|
computationalmystic/sengfs19-group3
|
6d636ce8ab1a50ce80f529d0130ae1a0b69c04d2
|
[
"MIT"
] | 1
|
2019-11-08T21:26:42.000Z
|
2019-11-08T21:26:42.000Z
|
import requests
import pytest
@pytest.fixture(scope="session")
def metrics():
pass
def test_contributors_by_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/20/contributors')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["total"] > 0
def test_contributors_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/20/repos/21000/contributors')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["total"] > 0
def test_contributors_new_by_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/24/contributors-new')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["count"] > 0
def test_contributors_new_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/20/repos/21070/contributors-new')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]["count"] > 0
def test_top_committers_by_repo(metrics):
response = requests.get('http://0.0.0.0:5000/api/unstable/repo-groups/22/repos/21334/top-committers')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]['commits'] > 0
def test_top_committers_by_group(metrics):
response = requests.get('http://0.0.0.0:5000/api/unstable/repo-groups/22/top-committers')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
assert data[0]['commits'] > 0
def test_committer_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/21/repos/21222/committers')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
def test_committer_by_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/21/committers?period=year')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
<<<<<<< HEAD
def test_contributors_by_company_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/20/contributors-by-company')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
=======
def test_messages_by_contributor_by_group(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/21/messages-by-contributor')
>>>>>>> aaf74f3279aa40047864ec896267fd48b4852347
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
<<<<<<< HEAD
def test_contributors_by_company_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/20/repos/25432/contributors-by-company')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
=======
def test_messages_by_contributor_by_repo(metrics):
response = requests.get('http://localhost:5000/api/unstable/repo-groups/21/repos/21222/messages-by-contributor')
data = response.json()
assert response.status_code == 200
assert len(data) >= 1
>>>>>>> aaf74f3279aa40047864ec896267fd48b4852347
| 34.739583
| 116
| 0.712744
| 447
| 3,335
| 5.187919
| 0.123043
| 0.036223
| 0.119017
| 0.134541
| 0.883139
| 0.883139
| 0.864166
| 0.85856
| 0.85856
| 0.85856
| 0
| 0.079637
| 0.141529
| 3,335
| 95
| 117
| 35.105263
| 0.730353
| 0
| 0
| 0.623377
| 0
| 0.12987
| 0.274963
| 0
| 0
| 0
| 0
| 0
| 0.38961
| 0
| null | null | 0.012987
| 0.025974
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a670947e4cf44c409c8ad5623b422d853b43fc91
| 6,419
|
py
|
Python
|
tests/test_crop_production.py
|
hkotaro1215/invest
|
1ba08bd746977bfa8a4600ad8c821fc43598c421
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_crop_production.py
|
hkotaro1215/invest
|
1ba08bd746977bfa8a4600ad8c821fc43598c421
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_crop_production.py
|
hkotaro1215/invest
|
1ba08bd746977bfa8a4600ad8c821fc43598c421
|
[
"BSD-3-Clause"
] | null | null | null |
"""Module for Regression Testing the InVEST Crop Production models."""
import unittest
import tempfile
import shutil
import os
import numpy
import pygeoprocessing.testing
from pygeoprocessing.testing import scm
MODEL_DATA_PATH = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-data',
'CropProduction', 'model_data')
SAMPLE_DATA_PATH = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-data',
'CropProduction', 'sample_user_data')
TEST_DATA_PATH = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'crop_production_model')
class CropProductionTests(unittest.TestCase):
"""Tests for the Crop Production model."""
def setUp(self):
"""Overriding setUp function to create temp workspace directory."""
# this lets us delete the workspace after its done no matter the
# the rest result
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Overriding tearDown function to remove temporary directory."""
shutil.rmtree(self.workspace_dir)
@scm.skip_if_data_missing(SAMPLE_DATA_PATH)
@scm.skip_if_data_missing(MODEL_DATA_PATH)
def test_crop_production_percentile(self):
"""Crop Production: test crop production percentile regression."""
from natcap.invest import crop_production_percentile
args = {
'workspace_dir': self.workspace_dir,
'results_suffix': '',
'landcover_raster_path': os.path.join(
SAMPLE_DATA_PATH, 'landcover.tif'),
'landcover_to_crop_table_path': os.path.join(
SAMPLE_DATA_PATH, 'landcover_to_crop_table.csv'),
'aggregate_polygon_path': os.path.join(
SAMPLE_DATA_PATH, 'aggregate_shape.shp'),
'aggregate_polygon_id': 'id',
'model_data_path': MODEL_DATA_PATH
}
crop_production_percentile.execute(args)
result_table_path = os.path.join(
args['workspace_dir'], 'aggregate_results.csv')
expected_result_table_path = os.path.join(
TEST_DATA_PATH, 'expected_aggregate_results.csv')
pygeoprocessing.testing.assert_csv_equal(
expected_result_table_path, result_table_path)
@scm.skip_if_data_missing(SAMPLE_DATA_PATH)
@scm.skip_if_data_missing(MODEL_DATA_PATH)
def test_crop_production_percentile_bad_crop(self):
"""Crop Production: test crop production with a bad crop name."""
from natcap.invest import crop_production_percentile
args = {
'workspace_dir': self.workspace_dir,
'results_suffix': '',
'landcover_raster_path': os.path.join(
SAMPLE_DATA_PATH, 'landcover.tif'),
'landcover_to_crop_table_path': os.path.join(
self.workspace_dir, 'landcover_to_badcrop_table.csv'),
'aggregate_polygon_path': os.path.join(
SAMPLE_DATA_PATH, 'aggregate_shape.shp'),
'aggregate_polygon_id': 'id',
'model_data_path': MODEL_DATA_PATH
}
with open(args['landcover_to_crop_table_path'],
'wb') as landcover_crop_table:
landcover_crop_table.write(
'crop_name,lucode\nfakecrop,20\n')
with self.assertRaises(ValueError):
crop_production_percentile.execute(args)
@scm.skip_if_data_missing(SAMPLE_DATA_PATH)
@scm.skip_if_data_missing(MODEL_DATA_PATH)
def test_crop_production_regression_bad_crop(self):
"""Crop Production: test crop regression with a bad crop name."""
from natcap.invest import crop_production_regression
args = {
'workspace_dir': self.workspace_dir,
'results_suffix': '',
'landcover_raster_path': os.path.join(
SAMPLE_DATA_PATH, 'landcover.tif'),
'landcover_to_crop_table_path': os.path.join(
SAMPLE_DATA_PATH, 'landcover_to_badcrop_table.csv'),
'aggregate_polygon_path': os.path.join(
SAMPLE_DATA_PATH, 'aggregate_shape.shp'),
'aggregate_polygon_id': 'id',
'model_data_path': MODEL_DATA_PATH,
'fertilization_rate_table_path': os.path.join(
SAMPLE_DATA_PATH, 'crop_fertilization_rates.csv'),
'nitrogen_fertilization_rate': 29.6,
'phosphorous_fertilization_rate': 8.4,
'potassium_fertilization_rate': 14.2,
}
with open(args['landcover_to_crop_table_path'],
'wb') as landcover_crop_table:
landcover_crop_table.write(
'crop_name,lucode\nfakecrop,20\n')
with self.assertRaises(ValueError):
crop_production_regression.execute(args)
@scm.skip_if_data_missing(SAMPLE_DATA_PATH)
@scm.skip_if_data_missing(MODEL_DATA_PATH)
def test_crop_production_regression(self):
"""Crop Production: test crop production regression model."""
from natcap.invest import crop_production_regression
args = {
'workspace_dir': self.workspace_dir,
'results_suffix': '',
'landcover_raster_path': os.path.join(
SAMPLE_DATA_PATH, 'landcover.tif'),
'landcover_to_crop_table_path': os.path.join(
SAMPLE_DATA_PATH, 'landcover_to_crop_table.csv'),
'aggregate_polygon_path': os.path.join(
SAMPLE_DATA_PATH, 'aggregate_shape.shp'),
'aggregate_polygon_id': 'id',
'model_data_path': MODEL_DATA_PATH,
'fertilization_rate_table_path': os.path.join(
SAMPLE_DATA_PATH, 'crop_fertilization_rates.csv'),
'nitrogen_fertilization_rate': 29.6,
'phosphorous_fertilization_rate': 8.4,
'potassium_fertilization_rate': 14.2,
}
crop_production_regression.execute(args)
result_table_path = os.path.join(
args['workspace_dir'], 'aggregate_results.csv')
expected_result_table_path = os.path.join(
TEST_DATA_PATH, 'expected_regression_aggregate_results.csv')
pygeoprocessing.testing.assert_csv_equal(
expected_result_table_path, result_table_path)
| 42.230263
| 76
| 0.639352
| 724
| 6,419
| 5.274862
| 0.149171
| 0.071223
| 0.054988
| 0.076984
| 0.832941
| 0.815135
| 0.793663
| 0.776381
| 0.776381
| 0.776381
| 0
| 0.004238
| 0.264839
| 6,419
| 151
| 77
| 42.509934
| 0.805043
| 0.084125
| 0
| 0.743802
| 0
| 0
| 0.256419
| 0.164263
| 0
| 0
| 0
| 0
| 0.033058
| 1
| 0.049587
| false
| 0
| 0.090909
| 0
| 0.14876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a6bd80b2215d0a98aa561a8e953075ada1458e09
| 54,327
|
py
|
Python
|
stubs/s3.py
|
claytonbrown/troposphere
|
bf0f1e48b14f578de0221d50f711467ad716ca87
|
[
"BSD-2-Clause"
] | null | null | null |
stubs/s3.py
|
claytonbrown/troposphere
|
bf0f1e48b14f578de0221d50f711467ad716ca87
|
[
"BSD-2-Clause"
] | null | null | null |
stubs/s3.py
|
claytonbrown/troposphere
|
bf0f1e48b14f578de0221d50f711467ad716ca87
|
[
"BSD-2-Clause"
] | null | null | null |
from . import AWSObject, AWSProperty
from .validators import *
from .constants import *
# -------------------------------------------
class S3ReplicationConfiguration(AWSProperty):
"""# ReplicationConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration.html",
"Properties": {
"Role": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration.html#cfn-s3-bucket-replicationconfiguration-role",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Rules": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration.html#cfn-s3-bucket-replicationconfiguration-rules",
"DuplicatesAllowed": false,
"ItemType": "ReplicationRule",
"Required": true,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Role': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration.html#cfn-s3-bucket-replicationconfiguration-role'),
'Rules': ([ReplicationRule], True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration.html#cfn-s3-bucket-replicationconfiguration-rules')
}
# -------------------------------------------
class S3NotificationFilter(AWSProperty):
"""# NotificationFilter - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter.html",
"Properties": {
"S3Key": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key",
"Required": true,
"Type": "S3KeyFilter",
"UpdateType": "Mutable"
}
}
}
"""
props = {
'S3Key': (S3KeyFilter, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key')
}
# -------------------------------------------
class S3Rule(AWSProperty):
"""# Rule - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html",
"Properties": {
"ExpirationDate": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-expirationdate",
"PrimitiveType": "Timestamp",
"Required": false,
"UpdateType": "Mutable"
},
"ExpirationInDays": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-expirationindays",
"PrimitiveType": "Integer",
"Required": false,
"UpdateType": "Mutable"
},
"Id": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-id",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"NoncurrentVersionExpirationInDays": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversionexpirationindays",
"PrimitiveType": "Integer",
"Required": false,
"UpdateType": "Mutable"
},
"NoncurrentVersionTransition": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition",
"Required": false,
"Type": "NoncurrentVersionTransition",
"UpdateType": "Mutable"
},
"NoncurrentVersionTransitions": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversiontransitions",
"Required": false,
"Type": "NoncurrentVersionTransition",
"UpdateType": "Mutable"
},
"Prefix": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-prefix",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"Status": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-status",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Transition": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-transition",
"Required": false,
"Type": "Transition",
"UpdateType": "Mutable"
},
"Transitions": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-transitions",
"Required": false,
"Type": "Transition",
"UpdateType": "Mutable"
}
}
}
"""
props = {
'ExpirationDate': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-expirationdate'),
'ExpirationInDays': (positive_integer, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-expirationindays'),
'Id': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-id'),
'NoncurrentVersionExpirationInDays': (positive_integer, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversionexpirationindays'),
'NoncurrentVersionTransition': (NoncurrentVersionTransition, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition'),
'NoncurrentVersionTransitions': (NoncurrentVersionTransition, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversiontransitions'),
'Prefix': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-prefix'),
'Status': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-status'),
'Transition': (Transition, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-transition'),
'Transitions': (Transition, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule.html#cfn-s3-bucket-lifecycleconfig-rule-transitions')
}
# -------------------------------------------
class S3RoutingRuleCondition(AWSProperty):
"""# RoutingRuleCondition - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-routingrulecondition.html",
"Properties": {
"HttpErrorCodeReturnedEquals": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-routingrulecondition.html#cfn-s3-websiteconfiguration-routingrules-routingrulecondition-httperrorcodereturnedequals",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"KeyPrefixEquals": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-routingrulecondition.html#cfn-s3-websiteconfiguration-routingrules-routingrulecondition-keyprefixequals",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'HttpErrorCodeReturnedEquals': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-routingrulecondition.html#cfn-s3-websiteconfiguration-routingrules-routingrulecondition-httperrorcodereturnedequals'),
'KeyPrefixEquals': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-routingrulecondition.html#cfn-s3-websiteconfiguration-routingrules-routingrulecondition-keyprefixequals')
}
# -------------------------------------------
class S3QueueConfiguration(AWSProperty):
"""# QueueConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-queueconfig.html",
"Properties": {
"Event": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-queueconfig.html#cfn-s3-bucket-notificationconfig-queueconfig-event",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Filter": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-queueconfig.html#cfn-s3-bucket-notificationconfig-queueconfig-filter",
"Required": false,
"Type": "NotificationFilter",
"UpdateType": "Mutable"
},
"Queue": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-queueconfig.html#cfn-s3-bucket-notificationconfig-queueconfig-queue",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Event': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-queueconfig.html#cfn-s3-bucket-notificationconfig-queueconfig-event'),
'Filter': (NotificationFilter, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-queueconfig.html#cfn-s3-bucket-notificationconfig-queueconfig-filter'),
'Queue': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-queueconfig.html#cfn-s3-bucket-notificationconfig-queueconfig-queue')
}
# -------------------------------------------
class S3LifecycleConfiguration(AWSProperty):
"""# LifecycleConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig.html",
"Properties": {
"Rules": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig.html#cfn-s3-bucket-lifecycleconfig-rules",
"DuplicatesAllowed": false,
"ItemType": "Rule",
"Required": true,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Rules': ([Rule], True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig.html#cfn-s3-bucket-lifecycleconfig-rules')
}
# -------------------------------------------
class S3TopicConfiguration(AWSProperty):
"""# TopicConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-topicconfig.html",
"Properties": {
"Event": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-topicconfig.html#cfn-s3-bucket-notificationconfig-topicconfig-event",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Filter": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-topicconfig.html#cfn-s3-bucket-notificationconfig-topicconfig-filter",
"Required": false,
"Type": "NotificationFilter",
"UpdateType": "Mutable"
},
"Topic": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-topicconfig.html#cfn-s3-bucket-notificationconfig-topicconfig-topic",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Event': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-topicconfig.html#cfn-s3-bucket-notificationconfig-topicconfig-event'),
'Filter': (NotificationFilter, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-topicconfig.html#cfn-s3-bucket-notificationconfig-topicconfig-filter'),
'Topic': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-topicconfig.html#cfn-s3-bucket-notificationconfig-topicconfig-topic')
}
# -------------------------------------------
class S3LambdaConfiguration(AWSProperty):
"""# LambdaConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-lambdaconfig.html",
"Properties": {
"Event": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-lambdaconfig.html#cfn-s3-bucket-notificationconfig-lambdaconfig-event",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Filter": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-lambdaconfig.html#cfn-s3-bucket-notificationconfig-lambdaconfig-filter",
"Required": false,
"Type": "NotificationFilter",
"UpdateType": "Mutable"
},
"Function": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-lambdaconfig.html#cfn-s3-bucket-notificationconfig-lambdaconfig-function",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Event': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-lambdaconfig.html#cfn-s3-bucket-notificationconfig-lambdaconfig-event'),
'Filter': (NotificationFilter, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-lambdaconfig.html#cfn-s3-bucket-notificationconfig-lambdaconfig-filter'),
'Function': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig-lambdaconfig.html#cfn-s3-bucket-notificationconfig-lambdaconfig-function')
}
# -------------------------------------------
class S3ReplicationRule(AWSProperty):
"""# ReplicationRule - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules.html",
"Properties": {
"Destination": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules.html#cfn-s3-bucket-replicationconfiguration-rules-destination",
"Required": true,
"Type": "ReplicationDestination",
"UpdateType": "Mutable"
},
"Id": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules.html#cfn-s3-bucket-replicationconfiguration-rules-id",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"Prefix": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules.html#cfn-s3-bucket-replicationconfiguration-rules-prefix",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Status": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules.html#cfn-s3-bucket-replicationconfiguration-rules-status",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Destination': (ReplicationDestination, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules.html#cfn-s3-bucket-replicationconfiguration-rules-destination'),
'Id': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules.html#cfn-s3-bucket-replicationconfiguration-rules-id'),
'Prefix': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules.html#cfn-s3-bucket-replicationconfiguration-rules-prefix'),
'Status': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules.html#cfn-s3-bucket-replicationconfiguration-rules-status')
}
# -------------------------------------------
class S3CorsRule(AWSProperty):
"""# CorsRule - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html",
"Properties": {
"AllowedHeaders": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-allowedheaders",
"DuplicatesAllowed": false,
"PrimitiveItemType": "String",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
},
"AllowedMethods": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-allowedmethods",
"DuplicatesAllowed": false,
"PrimitiveItemType": "String",
"Required": true,
"Type": "List",
"UpdateType": "Mutable"
},
"AllowedOrigins": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-allowedorigins",
"DuplicatesAllowed": false,
"PrimitiveItemType": "String",
"Required": true,
"Type": "List",
"UpdateType": "Mutable"
},
"ExposedHeaders": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-exposedheaders",
"DuplicatesAllowed": false,
"PrimitiveItemType": "String",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
},
"Id": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-id",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"MaxAge": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-maxage",
"PrimitiveType": "Integer",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'AllowedHeaders': ([basestring], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-allowedheaders'),
'AllowedMethods': ([basestring], True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-allowedmethods'),
'AllowedOrigins': ([basestring], True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-allowedorigins'),
'ExposedHeaders': ([basestring], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-exposedheaders'),
'Id': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-id'),
'MaxAge': (positive_integer, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors-corsrule.html#cfn-s3-bucket-cors-corsrule-maxage')
}
# -------------------------------------------
class S3Transition(AWSProperty):
"""# Transition - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-transition.html",
"Properties": {
"StorageClass": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-transition.html#cfn-s3-bucket-lifecycleconfig-rule-transition-storageclass",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"TransitionDate": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-transition.html#cfn-s3-bucket-lifecycleconfig-rule-transition-transitiondate",
"PrimitiveType": "Timestamp",
"Required": false,
"UpdateType": "Mutable"
},
"TransitionInDays": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-transition.html#cfn-s3-bucket-lifecycleconfig-rule-transition-transitionindays",
"PrimitiveType": "Integer",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'StorageClass': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-transition.html#cfn-s3-bucket-lifecycleconfig-rule-transition-storageclass'),
'TransitionDate': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-transition.html#cfn-s3-bucket-lifecycleconfig-rule-transition-transitiondate'),
'TransitionInDays': (positive_integer, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-transition.html#cfn-s3-bucket-lifecycleconfig-rule-transition-transitionindays')
}
# -------------------------------------------
class S3CorsConfiguration(AWSProperty):
"""# CorsConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors.html",
"Properties": {
"CorsRules": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors.html#cfn-s3-bucket-cors-corsrule",
"DuplicatesAllowed": false,
"ItemType": "CorsRule",
"Required": true,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
props = {
'CorsRules': ([CorsRule], True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-cors.html#cfn-s3-bucket-cors-corsrule')
}
# -------------------------------------------
class S3ReplicationDestination(AWSProperty):
"""# ReplicationDestination - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules-destination.html",
"Properties": {
"Bucket": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules-destination.html#cfn-s3-bucket-replicationconfiguration-rules-destination-bucket",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"StorageClass": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules-destination.html#cfn-s3-bucket-replicationconfiguration-rules-destination-storageclass",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Bucket': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules-destination.html#cfn-s3-bucket-replicationconfiguration-rules-destination-bucket'),
'StorageClass': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-replicationconfiguration-rules-destination.html#cfn-s3-bucket-replicationconfiguration-rules-destination-storageclass')
}
# -------------------------------------------
class S3LoggingConfiguration(AWSProperty):
"""# LoggingConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-loggingconfig.html",
"Properties": {
"DestinationBucketName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-loggingconfig.html#cfn-s3-bucket-loggingconfig-destinationbucketname",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"LogFilePrefix": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-loggingconfig.html#cfn-s3-bucket-loggingconfig-logfileprefix",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'DestinationBucketName': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-loggingconfig.html#cfn-s3-bucket-loggingconfig-destinationbucketname'),
'LogFilePrefix': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-loggingconfig.html#cfn-s3-bucket-loggingconfig-logfileprefix')
}
# -------------------------------------------
class S3RoutingRule(AWSProperty):
"""# RoutingRule - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules.html",
"Properties": {
"RedirectRule": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules.html#cfn-s3-websiteconfiguration-routingrules-redirectrule",
"Required": true,
"Type": "RedirectRule",
"UpdateType": "Mutable"
},
"RoutingRuleCondition": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules.html#cfn-s3-websiteconfiguration-routingrules-routingrulecondition",
"Required": false,
"Type": "RoutingRuleCondition",
"UpdateType": "Mutable"
}
}
}
"""
props = {
'RedirectRule': (RedirectRule, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules.html#cfn-s3-websiteconfiguration-routingrules-redirectrule'),
'RoutingRuleCondition': (RoutingRuleCondition, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules.html#cfn-s3-websiteconfiguration-routingrules-routingrulecondition')
}
# -------------------------------------------
class S3NoncurrentVersionTransition(AWSProperty):
"""# NoncurrentVersionTransition - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition.html",
"Properties": {
"StorageClass": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition-storageclass",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"TransitionInDays": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition-transitionindays",
"PrimitiveType": "Integer",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'StorageClass': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition-storageclass'),
'TransitionInDays': (positive_integer, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition.html#cfn-s3-bucket-lifecycleconfig-rule-noncurrentversiontransition-transitionindays')
}
# -------------------------------------------
class S3VersioningConfiguration(AWSProperty):
"""# VersioningConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-versioningconfig.html",
"Properties": {
"Status": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-versioningconfig.html#cfn-s3-bucket-versioningconfig-status",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Status': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-versioningconfig.html#cfn-s3-bucket-versioningconfig-status')
}
# -------------------------------------------
class S3FilterRule(AWSProperty):
"""# FilterRule - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key-rules.html",
"Properties": {
"Name": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key-rules.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key-rules-name",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Value": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key-rules.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key-rules-value",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Name': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key-rules.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key-rules-name'),
'Value': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key-rules.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key-rules-value')
}
# -------------------------------------------
class S3NotificationConfiguration(AWSProperty):
"""# NotificationConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig.html",
"Properties": {
"LambdaConfigurations": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig.html#cfn-s3-bucket-notificationconfig-lambdaconfig",
"DuplicatesAllowed": false,
"ItemType": "LambdaConfiguration",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
},
"QueueConfigurations": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig.html#cfn-s3-bucket-notificationconfig-queueconfig",
"DuplicatesAllowed": false,
"ItemType": "QueueConfiguration",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
},
"TopicConfigurations": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig.html#cfn-s3-bucket-notificationconfig-topicconfig",
"DuplicatesAllowed": false,
"ItemType": "TopicConfiguration",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
props = {
'LambdaConfigurations': ([LambdaConfiguration], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig.html#cfn-s3-bucket-notificationconfig-lambdaconfig'),
'QueueConfigurations': ([QueueConfiguration], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig.html#cfn-s3-bucket-notificationconfig-queueconfig'),
'TopicConfigurations': ([TopicConfiguration], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig.html#cfn-s3-bucket-notificationconfig-topicconfig')
}
# -------------------------------------------
class S3RedirectRule(AWSProperty):
"""# RedirectRule - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html",
"Properties": {
"HostName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-hostname",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"HttpRedirectCode": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-httpredirectcode",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"Protocol": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-protocol",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"ReplaceKeyPrefixWith": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-replacekeyprefixwith",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"ReplaceKeyWith": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-replacekeywith",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'HostName': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-hostname'),
'HttpRedirectCode': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-httpredirectcode'),
'Protocol': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-protocol'),
'ReplaceKeyPrefixWith': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-replacekeyprefixwith'),
'ReplaceKeyWith': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules-redirectrule.html#cfn-s3-websiteconfiguration-redirectrule-replacekeywith')
}
# -------------------------------------------
class S3RedirectAllRequestsTo(AWSProperty):
"""# RedirectAllRequestsTo - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-redirectallrequeststo.html",
"Properties": {
"HostName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-redirectallrequeststo.html#cfn-s3-websiteconfiguration-redirectallrequeststo-hostname",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Mutable"
},
"Protocol": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-redirectallrequeststo.html#cfn-s3-websiteconfiguration-redirectallrequeststo-protocol",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'HostName': (basestring, True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-redirectallrequeststo.html#cfn-s3-websiteconfiguration-redirectallrequeststo-hostname'),
'Protocol': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-redirectallrequeststo.html#cfn-s3-websiteconfiguration-redirectallrequeststo-protocol')
}
# -------------------------------------------
class S3S3KeyFilter(AWSProperty):
"""# S3KeyFilter - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key.html",
"Properties": {
"Rules": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key-rules",
"DuplicatesAllowed": false,
"ItemType": "FilterRule",
"Required": true,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
props = {
'Rules': ([FilterRule], True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfiguration-config-filter-s3key.html#cfn-s3-bucket-notificationconfiguraiton-config-filter-s3key-rules')
}
# -------------------------------------------
class S3WebsiteConfiguration(AWSProperty):
"""# WebsiteConfiguration - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration.html",
"Properties": {
"ErrorDocument": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration.html#cfn-s3-websiteconfiguration-errordocument",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"IndexDocument": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration.html#cfn-s3-websiteconfiguration-indexdocument",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"RedirectAllRequestsTo": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration.html#cfn-s3-websiteconfiguration-redirectallrequeststo",
"Required": false,
"Type": "RedirectAllRequestsTo",
"UpdateType": "Mutable"
},
"RoutingRules": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration.html#cfn-s3-websiteconfiguration-routingrules",
"DuplicatesAllowed": false,
"ItemType": "RoutingRule",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
props = {
'ErrorDocument': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration.html#cfn-s3-websiteconfiguration-errordocument'),
'IndexDocument': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration.html#cfn-s3-websiteconfiguration-indexdocument'),
'RedirectAllRequestsTo': (RedirectAllRequestsTo, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration.html#cfn-s3-websiteconfiguration-redirectallrequeststo'),
'RoutingRules': ([RoutingRule], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration.html#cfn-s3-websiteconfiguration-routingrules')
}
# -------------------------------------------
class S3Bucket(AWSObject):
"""# AWS::S3::Bucket - CloudFormationResourceSpecification version: 1.4.0
{
"Attributes": {
"DomainName": {
"PrimitiveType": "String"
},
"DualStackDomainName": {
"PrimitiveType": "String"
},
"WebsiteURL": {
"PrimitiveType": "String"
}
},
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html",
"Properties": {
"AccessControl": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-accesscontrol",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"BucketName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-name",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Immutable"
},
"CorsConfiguration": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-crossoriginconfig",
"Required": false,
"Type": "CorsConfiguration",
"UpdateType": "Mutable"
},
"LifecycleConfiguration": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-lifecycleconfig",
"Required": false,
"Type": "LifecycleConfiguration",
"UpdateType": "Mutable"
},
"LoggingConfiguration": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-loggingconfig",
"Required": false,
"Type": "LoggingConfiguration",
"UpdateType": "Mutable"
},
"NotificationConfiguration": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-notification",
"Required": false,
"Type": "NotificationConfiguration",
"UpdateType": "Mutable"
},
"ReplicationConfiguration": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-replicationconfiguration",
"Required": false,
"Type": "ReplicationConfiguration",
"UpdateType": "Mutable"
},
"Tags": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-tags",
"DuplicatesAllowed": true,
"ItemType": "Tag",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
},
"VersioningConfiguration": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-versioning",
"Required": false,
"Type": "VersioningConfiguration",
"UpdateType": "Mutable"
},
"WebsiteConfiguration": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-websiteconfiguration",
"Required": false,
"Type": "WebsiteConfiguration",
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::S3::Bucket"
props = {
'AccessControl': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-accesscontrol'),
'BucketName': (basestring, False, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-name'),
'CorsConfiguration': (CorsConfiguration, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-crossoriginconfig'),
'LifecycleConfiguration': (LifecycleConfiguration, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-lifecycleconfig'),
'LoggingConfiguration': (LoggingConfiguration, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-loggingconfig'),
'NotificationConfiguration': (NotificationConfiguration, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-notification'),
'ReplicationConfiguration': (ReplicationConfiguration, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-replicationconfiguration'),
'Tags': ([Tag], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-tags'),
'VersioningConfiguration': (VersioningConfiguration, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-versioning'),
'WebsiteConfiguration': (WebsiteConfiguration, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket.html#cfn-s3-bucket-websiteconfiguration')
}
# -------------------------------------------
class S3BucketPolicy(AWSObject):
"""# AWS::S3::BucketPolicy - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-policy.html",
"Properties": {
"Bucket": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-policy.html#cfn-s3-bucketpolicy-bucket",
"PrimitiveType": "String",
"Required": true,
"UpdateType": "Immutable"
},
"PolicyDocument": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-policy.html#cfn-s3-bucketpolicy-policydocument",
"PrimitiveType": "Json",
"Required": true,
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::S3::BucketPolicy"
props = {
'Bucket': (basestring, True, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-policy.html#cfn-s3-bucketpolicy-bucket'),
'PolicyDocument': ((basestring, dict), True, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-policy.html#cfn-s3-bucketpolicy-policydocument')
}
| 59.244275
| 303
| 0.678871
| 4,790
| 54,327
| 7.698121
| 0.03382
| 0.056625
| 0.053398
| 0.082524
| 0.874166
| 0.871481
| 0.838965
| 0.813609
| 0.808185
| 0.804822
| 0
| 0.010057
| 0.15992
| 54,327
| 916
| 304
| 59.308952
| 0.797892
| 0.642204
| 0
| 0.159236
| 0
| 0.490446
| 0.7325
| 0.014103
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019108
| 0
| 0.350318
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a6bdae24de834b6efe61c0b9ee65a34c5ce35ff7
| 22,780
|
py
|
Python
|
sdk/python/pulumi_azure/synapse/sql_pool_vulnerability_assessment_baseline.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/synapse/sql_pool_vulnerability_assessment_baseline.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/synapse/sql_pool_vulnerability_assessment_baseline.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SqlPoolVulnerabilityAssessmentBaselineArgs', 'SqlPoolVulnerabilityAssessmentBaseline']
@pulumi.input_type
class SqlPoolVulnerabilityAssessmentBaselineArgs:
def __init__(__self__, *,
rule_name: pulumi.Input[str],
sql_pool_vulnerability_assessment_id: pulumi.Input[str],
baselines: Optional[pulumi.Input[Sequence[pulumi.Input['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SqlPoolVulnerabilityAssessmentBaseline resource.
:param pulumi.Input[str] rule_name: The ID of the vulnerability assessment rule.
:param pulumi.Input[str] sql_pool_vulnerability_assessment_id: The ID of the Synapse SQL Pool Vulnerability Assessment. Changing this forces a new Synapse SQL Pool Vulnerability Assessment Rule Baseline to be created.
:param pulumi.Input[Sequence[pulumi.Input['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]] baselines: One or more `baseline` blocks as defined below.
:param pulumi.Input[str] name: The name which should be used for this Synapse SQL Pool Vulnerability Assessment Rule Baseline.
"""
pulumi.set(__self__, "rule_name", rule_name)
pulumi.set(__self__, "sql_pool_vulnerability_assessment_id", sql_pool_vulnerability_assessment_id)
if baselines is not None:
pulumi.set(__self__, "baselines", baselines)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> pulumi.Input[str]:
"""
The ID of the vulnerability assessment rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter(name="sqlPoolVulnerabilityAssessmentId")
def sql_pool_vulnerability_assessment_id(self) -> pulumi.Input[str]:
"""
The ID of the Synapse SQL Pool Vulnerability Assessment. Changing this forces a new Synapse SQL Pool Vulnerability Assessment Rule Baseline to be created.
"""
return pulumi.get(self, "sql_pool_vulnerability_assessment_id")
@sql_pool_vulnerability_assessment_id.setter
def sql_pool_vulnerability_assessment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "sql_pool_vulnerability_assessment_id", value)
@property
@pulumi.getter
def baselines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]]:
"""
One or more `baseline` blocks as defined below.
"""
return pulumi.get(self, "baselines")
@baselines.setter
def baselines(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]]):
pulumi.set(self, "baselines", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Synapse SQL Pool Vulnerability Assessment Rule Baseline.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _SqlPoolVulnerabilityAssessmentBaselineState:
def __init__(__self__, *,
baselines: Optional[pulumi.Input[Sequence[pulumi.Input['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
sql_pool_vulnerability_assessment_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SqlPoolVulnerabilityAssessmentBaseline resources.
:param pulumi.Input[Sequence[pulumi.Input['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]] baselines: One or more `baseline` blocks as defined below.
:param pulumi.Input[str] name: The name which should be used for this Synapse SQL Pool Vulnerability Assessment Rule Baseline.
:param pulumi.Input[str] rule_name: The ID of the vulnerability assessment rule.
:param pulumi.Input[str] sql_pool_vulnerability_assessment_id: The ID of the Synapse SQL Pool Vulnerability Assessment. Changing this forces a new Synapse SQL Pool Vulnerability Assessment Rule Baseline to be created.
"""
if baselines is not None:
pulumi.set(__self__, "baselines", baselines)
if name is not None:
pulumi.set(__self__, "name", name)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
if sql_pool_vulnerability_assessment_id is not None:
pulumi.set(__self__, "sql_pool_vulnerability_assessment_id", sql_pool_vulnerability_assessment_id)
@property
@pulumi.getter
def baselines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]]:
"""
One or more `baseline` blocks as defined below.
"""
return pulumi.get(self, "baselines")
@baselines.setter
def baselines(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]]):
pulumi.set(self, "baselines", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Synapse SQL Pool Vulnerability Assessment Rule Baseline.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the vulnerability assessment rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter(name="sqlPoolVulnerabilityAssessmentId")
def sql_pool_vulnerability_assessment_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Synapse SQL Pool Vulnerability Assessment. Changing this forces a new Synapse SQL Pool Vulnerability Assessment Rule Baseline to be created.
"""
return pulumi.get(self, "sql_pool_vulnerability_assessment_id")
@sql_pool_vulnerability_assessment_id.setter
def sql_pool_vulnerability_assessment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_pool_vulnerability_assessment_id", value)
class SqlPoolVulnerabilityAssessmentBaseline(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
baselines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
sql_pool_vulnerability_assessment_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Synapse SQL Pool Vulnerability Assessment Rule Baseline.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="west europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_kind="BlobStorage",
account_tier="Standard",
account_replication_type="LRS")
example_data_lake_gen2_filesystem = azure.storage.DataLakeGen2Filesystem("exampleDataLakeGen2Filesystem", storage_account_id=example_account.id)
example_workspace = azure.synapse.Workspace("exampleWorkspace",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
storage_data_lake_gen2_filesystem_id=example_data_lake_gen2_filesystem.id,
sql_administrator_login="sqladminuser",
sql_administrator_login_password="H@Sh1CoR3!")
example_sql_pool = azure.synapse.SqlPool("exampleSqlPool",
synapse_workspace_id=example_workspace.id,
sku_name="DW100c",
create_mode="Default")
example_storage_account_account = azure.storage.Account("exampleStorage/accountAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_kind="BlobStorage",
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_sql_pool_security_alert_policy = azure.synapse.SqlPoolSecurityAlertPolicy("exampleSqlPoolSecurityAlertPolicy",
sql_pool_id=example_sql_pool.id,
policy_state="Enabled",
storage_endpoint=example_account.primary_blob_endpoint,
storage_account_access_key=example_account.primary_access_key)
example_sql_pool_vulnerability_assessment = azure.synapse.SqlPoolVulnerabilityAssessment("exampleSqlPoolVulnerabilityAssessment",
sql_pool_security_alert_policy_id=example_sql_pool_security_alert_policy.id,
storage_container_path=pulumi.Output.all(example_account.primary_blob_endpoint, example_container.name).apply(lambda primary_blob_endpoint, name: f"{primary_blob_endpoint}{name}/"),
storage_account_access_key=example_account.primary_access_key)
example_sql_pool_vulnerability_assessment_baseline = azure.synapse.SqlPoolVulnerabilityAssessmentBaseline("exampleSqlPoolVulnerabilityAssessmentBaseline",
rule_name="VA1017",
sql_pool_vulnerability_assessment_id=azurerm_synapse_sql_pool_vulnerability_assessment["test"]["id"],
baselines=[
azure.synapse.SqlPoolVulnerabilityAssessmentBaselineBaselineArgs(
results=[
"userA",
"SELECT",
],
),
azure.synapse.SqlPoolVulnerabilityAssessmentBaselineBaselineArgs(
results=[
"userB",
"SELECT",
],
),
])
```
## Import
Synapse SQL Pool Vulnerability Assessment Rule Baselines can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:synapse/sqlPoolVulnerabilityAssessmentBaseline:SqlPoolVulnerabilityAssessmentBaseline example /subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Synapse/workspaces/workspace1/sqlPools/sqlPool1/vulnerabilityAssessments/default/rules/rule1/baselines/baseline1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]] baselines: One or more `baseline` blocks as defined below.
:param pulumi.Input[str] name: The name which should be used for this Synapse SQL Pool Vulnerability Assessment Rule Baseline.
:param pulumi.Input[str] rule_name: The ID of the vulnerability assessment rule.
:param pulumi.Input[str] sql_pool_vulnerability_assessment_id: The ID of the Synapse SQL Pool Vulnerability Assessment. Changing this forces a new Synapse SQL Pool Vulnerability Assessment Rule Baseline to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlPoolVulnerabilityAssessmentBaselineArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Synapse SQL Pool Vulnerability Assessment Rule Baseline.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="west europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_kind="BlobStorage",
account_tier="Standard",
account_replication_type="LRS")
example_data_lake_gen2_filesystem = azure.storage.DataLakeGen2Filesystem("exampleDataLakeGen2Filesystem", storage_account_id=example_account.id)
example_workspace = azure.synapse.Workspace("exampleWorkspace",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
storage_data_lake_gen2_filesystem_id=example_data_lake_gen2_filesystem.id,
sql_administrator_login="sqladminuser",
sql_administrator_login_password="H@Sh1CoR3!")
example_sql_pool = azure.synapse.SqlPool("exampleSqlPool",
synapse_workspace_id=example_workspace.id,
sku_name="DW100c",
create_mode="Default")
example_storage_account_account = azure.storage.Account("exampleStorage/accountAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_kind="BlobStorage",
account_tier="Standard",
account_replication_type="LRS")
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_sql_pool_security_alert_policy = azure.synapse.SqlPoolSecurityAlertPolicy("exampleSqlPoolSecurityAlertPolicy",
sql_pool_id=example_sql_pool.id,
policy_state="Enabled",
storage_endpoint=example_account.primary_blob_endpoint,
storage_account_access_key=example_account.primary_access_key)
example_sql_pool_vulnerability_assessment = azure.synapse.SqlPoolVulnerabilityAssessment("exampleSqlPoolVulnerabilityAssessment",
sql_pool_security_alert_policy_id=example_sql_pool_security_alert_policy.id,
storage_container_path=pulumi.Output.all(example_account.primary_blob_endpoint, example_container.name).apply(lambda primary_blob_endpoint, name: f"{primary_blob_endpoint}{name}/"),
storage_account_access_key=example_account.primary_access_key)
example_sql_pool_vulnerability_assessment_baseline = azure.synapse.SqlPoolVulnerabilityAssessmentBaseline("exampleSqlPoolVulnerabilityAssessmentBaseline",
rule_name="VA1017",
sql_pool_vulnerability_assessment_id=azurerm_synapse_sql_pool_vulnerability_assessment["test"]["id"],
baselines=[
azure.synapse.SqlPoolVulnerabilityAssessmentBaselineBaselineArgs(
results=[
"userA",
"SELECT",
],
),
azure.synapse.SqlPoolVulnerabilityAssessmentBaselineBaselineArgs(
results=[
"userB",
"SELECT",
],
),
])
```
## Import
Synapse SQL Pool Vulnerability Assessment Rule Baselines can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:synapse/sqlPoolVulnerabilityAssessmentBaseline:SqlPoolVulnerabilityAssessmentBaseline example /subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Synapse/workspaces/workspace1/sqlPools/sqlPool1/vulnerabilityAssessments/default/rules/rule1/baselines/baseline1
```
:param str resource_name: The name of the resource.
:param SqlPoolVulnerabilityAssessmentBaselineArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlPoolVulnerabilityAssessmentBaselineArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
baselines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
sql_pool_vulnerability_assessment_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlPoolVulnerabilityAssessmentBaselineArgs.__new__(SqlPoolVulnerabilityAssessmentBaselineArgs)
__props__.__dict__["baselines"] = baselines
__props__.__dict__["name"] = name
if rule_name is None and not opts.urn:
raise TypeError("Missing required property 'rule_name'")
__props__.__dict__["rule_name"] = rule_name
if sql_pool_vulnerability_assessment_id is None and not opts.urn:
raise TypeError("Missing required property 'sql_pool_vulnerability_assessment_id'")
__props__.__dict__["sql_pool_vulnerability_assessment_id"] = sql_pool_vulnerability_assessment_id
super(SqlPoolVulnerabilityAssessmentBaseline, __self__).__init__(
'azure:synapse/sqlPoolVulnerabilityAssessmentBaseline:SqlPoolVulnerabilityAssessmentBaseline',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
baselines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
sql_pool_vulnerability_assessment_id: Optional[pulumi.Input[str]] = None) -> 'SqlPoolVulnerabilityAssessmentBaseline':
"""
Get an existing SqlPoolVulnerabilityAssessmentBaseline resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SqlPoolVulnerabilityAssessmentBaselineBaselineArgs']]]] baselines: One or more `baseline` blocks as defined below.
:param pulumi.Input[str] name: The name which should be used for this Synapse SQL Pool Vulnerability Assessment Rule Baseline.
:param pulumi.Input[str] rule_name: The ID of the vulnerability assessment rule.
:param pulumi.Input[str] sql_pool_vulnerability_assessment_id: The ID of the Synapse SQL Pool Vulnerability Assessment. Changing this forces a new Synapse SQL Pool Vulnerability Assessment Rule Baseline to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SqlPoolVulnerabilityAssessmentBaselineState.__new__(_SqlPoolVulnerabilityAssessmentBaselineState)
__props__.__dict__["baselines"] = baselines
__props__.__dict__["name"] = name
__props__.__dict__["rule_name"] = rule_name
__props__.__dict__["sql_pool_vulnerability_assessment_id"] = sql_pool_vulnerability_assessment_id
return SqlPoolVulnerabilityAssessmentBaseline(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def baselines(self) -> pulumi.Output[Optional[Sequence['outputs.SqlPoolVulnerabilityAssessmentBaselineBaseline']]]:
"""
One or more `baseline` blocks as defined below.
"""
return pulumi.get(self, "baselines")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name which should be used for this Synapse SQL Pool Vulnerability Assessment Rule Baseline.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> pulumi.Output[str]:
"""
The ID of the vulnerability assessment rule.
"""
return pulumi.get(self, "rule_name")
@property
@pulumi.getter(name="sqlPoolVulnerabilityAssessmentId")
def sql_pool_vulnerability_assessment_id(self) -> pulumi.Output[str]:
"""
The ID of the Synapse SQL Pool Vulnerability Assessment. Changing this forces a new Synapse SQL Pool Vulnerability Assessment Rule Baseline to be created.
"""
return pulumi.get(self, "sql_pool_vulnerability_assessment_id")
| 52.853828
| 334
| 0.697717
| 2,330
| 22,780
| 6.534764
| 0.10515
| 0.0354
| 0.08538
| 0.12807
| 0.852752
| 0.840996
| 0.834297
| 0.823
| 0.810062
| 0.805399
| 0
| 0.005801
| 0.2205
| 22,780
| 430
| 335
| 52.976744
| 0.851664
| 0.499517
| 0
| 0.638418
| 1
| 0
| 0.164625
| 0.119514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152542
| false
| 0.00565
| 0.039548
| 0
| 0.282486
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
47017f157cf3ac5f7a3398e0f52a084d702c3645
| 8,922
|
py
|
Python
|
opac/tests/test_interface_TOC.py
|
joffilyfe/opac
|
f852df96f31ecbedb037631f44f227d58f029b80
|
[
"BSD-2-Clause"
] | 1
|
2019-10-07T00:25:39.000Z
|
2019-10-07T00:25:39.000Z
|
opac/tests/test_interface_TOC.py
|
joffilyfe/opac
|
f852df96f31ecbedb037631f44f227d58f029b80
|
[
"BSD-2-Clause"
] | null | null | null |
opac/tests/test_interface_TOC.py
|
joffilyfe/opac
|
f852df96f31ecbedb037631f44f227d58f029b80
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
import flask
from flask import url_for
from .base import BaseTestCase
from . import utils
class TOCTestCase(BaseTestCase):
# TOC
def test_the_title_of_the_article_list_when_language_pt(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Português.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='pt_BR')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'pt_BR')
self.assertIn("Artigo Com Título Em Português", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_es(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Espanhol.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Título Del Artículo En Portugués",
response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_en(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Inglês.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Title In Portuguese", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não tem idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_unknow_language_for_article(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não conhece o idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_with_and_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original para artigos que não tem tradução e o título traduzido
quando tem tradução do título.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': []
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Article Y", response.data.decode('utf-8'))
self.assertIn("Título Del Artículo En Portugués", response.data.decode('utf-8'))
| 34.315385
| 92
| 0.556153
| 926
| 8,922
| 5.170626
| 0.116631
| 0.063492
| 0.025063
| 0.042398
| 0.939014
| 0.933584
| 0.933584
| 0.933584
| 0.933584
| 0.933584
| 0
| 0.004425
| 0.341403
| 8,922
| 259
| 93
| 34.447876
| 0.810415
| 0.116566
| 0
| 0.81761
| 0
| 0
| 0.165495
| 0
| 0
| 0
| 0
| 0
| 0.119497
| 1
| 0.037736
| false
| 0
| 0.025157
| 0
| 0.069182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b376a33e3fddd9dae05b462dc83f752be94f9e5
| 51
|
py
|
Python
|
sample/__init__.py
|
lshang0311/python-sample-project
|
10270699d10e6f04b7c4400574cb005c2ce00f6a
|
[
"BSD-2-Clause"
] | null | null | null |
sample/__init__.py
|
lshang0311/python-sample-project
|
10270699d10e6f04b7c4400574cb005c2ce00f6a
|
[
"BSD-2-Clause"
] | null | null | null |
sample/__init__.py
|
lshang0311/python-sample-project
|
10270699d10e6f04b7c4400574cb005c2ce00f6a
|
[
"BSD-2-Clause"
] | null | null | null |
from .core import hmm
from .core import simple_sum
| 17
| 28
| 0.803922
| 9
| 51
| 4.444444
| 0.666667
| 0.4
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 51
| 2
| 29
| 25.5
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5b750f323a23631f89ff45ff4e581312e6e96300
| 209
|
py
|
Python
|
lumin/optimisation/__init__.py
|
choisant/lumin
|
c039136eb096e8f3800f13925f9325b99cf7e76b
|
[
"Apache-2.0"
] | 43
|
2019-02-11T16:16:42.000Z
|
2021-12-13T15:35:20.000Z
|
lumin/optimisation/__init__.py
|
choisant/lumin
|
c039136eb096e8f3800f13925f9325b99cf7e76b
|
[
"Apache-2.0"
] | 48
|
2020-05-21T02:40:50.000Z
|
2021-08-10T11:07:08.000Z
|
lumin/optimisation/__init__.py
|
choisant/lumin
|
c039136eb096e8f3800f13925f9325b99cf7e76b
|
[
"Apache-2.0"
] | 14
|
2019-05-02T15:09:41.000Z
|
2022-01-12T21:13:34.000Z
|
# from .features import * # noqa F304
# from .hyper_param import * # noqa F304
# from .threshold import * # noqa F304
# __all__ = [*features.__all__, *hyper_param.__all__, *threshold.__all__] # noqa F405
| 34.833333
| 86
| 0.698565
| 26
| 209
| 4.923077
| 0.384615
| 0.234375
| 0.328125
| 0.28125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 0.181818
| 209
| 5
| 87
| 41.8
| 0.678363
| 0.933014
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5bb10a6f002de7790ec61b8b98eed9be29902a77
| 13,534
|
py
|
Python
|
models/utils.py
|
shuaiqi361/a-PyTorch-Tutorial-to-Object-Detection
|
5706b82ff67911864967aa72adf7e4a994c7ec89
|
[
"MIT"
] | null | null | null |
models/utils.py
|
shuaiqi361/a-PyTorch-Tutorial-to-Object-Detection
|
5706b82ff67911864967aa72adf7e4a994c7ec89
|
[
"MIT"
] | null | null | null |
models/utils.py
|
shuaiqi361/a-PyTorch-Tutorial-to-Object-Detection
|
5706b82ff67911864967aa72adf7e4a994c7ec89
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from dataset.transforms import *
import torch.nn.functional as F
from torchvision.ops import nms
def conv3x3(in_planes, out_planes, stride=1):
"""
3x3 convolution with padding, default stride 1, shape unchanged
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def detect(predicted_locs, predicted_scores, min_score, max_overlap, top_k, priors_cxcy,
config, prior_positives_idx=None):
"""
Decipher the 22536 locations and class scores (output of ths SSD300) to detect objects.
For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.
:param prior_positives_idx:
:param config:
:param priors_cxcy:
:param predicted_locs: predicted locations/boxes w.r.t the 22536 prior boxes, a tensor of dimensions (N, 22536, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 22536, n_classes)
:param min_score: minimum threshold for a box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:return: detections (boxes, labels, and scores), lists of length batch_size
"""
if isinstance(priors_cxcy, list):
priors_cxcy = torch.cat(priors_cxcy, dim=0)
box_type = config.model['box_type']
device = config.device
focal_type = config['focal_type']
batch_size = predicted_locs.size(0)
n_priors = priors_cxcy.size(0)
n_classes = predicted_scores.size(2)
reg_type = config['reg_loss']
if focal_type.lower() == 'sigmoid':
predicted_scores = predicted_scores.sigmoid()
else:
predicted_scores = predicted_scores.softmax(dim=2) # softmax activation
# Lists to store final predicted boxes, labels, and scores for all images
all_images_boxes = list()
all_images_labels = list()
all_images_scores = list()
for i in range(batch_size):
# Decode object coordinates from the form we regressed predicted boxes to
if box_type == 'offset':
decoded_locs = cxcy_to_xy(
gcxgcy_to_cxcy(predicted_locs[i], priors_cxcy)).clamp_(0, 1)
elif box_type == 'center':
decoded_locs = cxcy_to_xy(predicted_locs[i]).clamp_(0, 1)
else:
decoded_locs = predicted_locs[i].clamp_(0, 1)
# Lists to store boxes and scores for this image
image_boxes = list()
image_labels = list()
image_scores = list()
# max_scores, best_label = predicted_scores[i].max(dim=1) # (22536)
if prior_positives_idx is not None:
class_scores_all = torch.index_select(predicted_scores[i], dim=0,
index=prior_positives_idx[i].nonzero().squeeze(-1))
decoded_locs_all = torch.index_select(decoded_locs, dim=0,
index=prior_positives_idx[i].nonzero().squeeze(-1))
else:
class_scores_all = predicted_scores[i, :, :]
decoded_locs_all = decoded_locs
# Check for each class, excluding the background class 0
for c in range(1, n_classes):
# Keep only predicted boxes and scores where scores for this class are above the minimum score
class_scores = class_scores_all[:, c]
score_above_min_score = (class_scores > min_score).long() # for indexing
# print(c, score_above_min_score.size())
# exit()
n_above_min_score = torch.sum(score_above_min_score).item()
if n_above_min_score == 0:
continue
class_scores = torch.index_select(class_scores, dim=0,
index=torch.nonzero(score_above_min_score).squeeze(dim=1))
class_decoded_locs = torch.index_select(decoded_locs_all, dim=0,
index=torch.nonzero(score_above_min_score).squeeze(dim=1))
# if reg_type.lower() == 'iou':
# anchor_nms_idx, _ = diounms(class_decoded_locs, class_scores, max_overlap)
# else:
# anchor_nms_idx = nms(class_decoded_locs, class_scores, max_overlap)
anchor_nms_idx = nms(class_decoded_locs, class_scores, max_overlap)
# Store only unsuppressed boxes for this class
image_boxes.append(class_decoded_locs[anchor_nms_idx, :])
image_labels.append(torch.LongTensor(anchor_nms_idx.size(0) * [c]).to(device))
image_scores.append(class_scores[anchor_nms_idx])
# If no object in any class is found, store a placeholder for 'background'
if len(image_boxes) == 0:
image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))
image_labels.append(torch.LongTensor([0]).to(device))
image_scores.append(torch.FloatTensor([0.]).to(device))
# Concatenate into single tensors
image_boxes = torch.cat(image_boxes, dim=0) # (n_objects, 4)
image_labels = torch.cat(image_labels, dim=0) # (n_objects)
image_scores = torch.cat(image_scores, dim=0) # (n_objects)
n_objects = image_scores.size(0)
# Keep only the top k objects
if n_objects > top_k:
image_scores, sort_ind = image_scores.sort(dim=0, descending=True)
image_scores = image_scores[:top_k]
image_boxes = image_boxes[sort_ind][:top_k]
image_labels = image_labels[sort_ind][:top_k]
# Append to lists that store predicted boxes and scores for all images
all_images_boxes.append(image_boxes)
all_images_labels.append(image_labels)
all_images_scores.append(image_scores)
return all_images_boxes, all_images_labels, all_images_scores
def detect_focal(predicted_locs, predicted_scores, min_score, max_overlap, top_k, priors_cxcy,
config, prior_positives_idx=None):
"""
Decipher the 22536 locations and class scores (output of ths SSD300) to detect objects.
For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.
:param prior_positives_idx:
:param config:
:param priors_cxcy:
:param predicted_locs: predicted locations/boxes w.r.t the 22536 prior boxes, a tensor of dimensions (N, 22536, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 22536, n_classes)
:param min_score: minimum threshold for a box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:return: detections (boxes, labels, and scores), lists of length batch_size
"""
if isinstance(priors_cxcy, list):
priors_cxcy = torch.cat(priors_cxcy, dim=0)
box_type = config.model['box_type']
device = config.device
focal_type = config['focal_type']
reg_type = config['reg_loss']
batch_size = predicted_locs.size(0)
n_classes = predicted_scores.size(2)
if focal_type.lower() == 'sigmoid':
predicted_scores = predicted_scores.sigmoid()
else:
predicted_scores = predicted_scores.softmax(dim=2) # softmax activation
# Lists to store final predicted boxes, labels, and scores for all images
all_images_boxes = list()
all_images_labels = list()
all_images_scores = list()
for i in range(batch_size):
# Decode object coordinates from the form we regressed predicted boxes to
if box_type == 'offset':
decoded_locs = cxcy_to_xy(
gcxgcy_to_cxcy(predicted_locs[i], priors_cxcy)).clamp_(0, 1)
elif box_type == 'center':
decoded_locs = cxcy_to_xy(predicted_locs[i]).clamp_(0, 1)
else:
decoded_locs = predicted_locs[i].clamp_(0, 1)
# Lists to store boxes and scores for this image
image_boxes = list()
image_labels = list()
image_scores = list()
# max_scores, best_label = predicted_scores[i].max(dim=1) # (22536)
if prior_positives_idx is not None:
class_scores_all = torch.index_select(predicted_scores[i], dim=0,
index=prior_positives_idx[i].nonzero().squeeze(-1))
decoded_locs_all = torch.index_select(decoded_locs, dim=0,
index=prior_positives_idx[i].nonzero().squeeze(-1))
else:
class_scores_all = predicted_scores[i, :, :]
decoded_locs_all = decoded_locs
# Check for each class
for c in range(n_classes): # n_classes = 20 for VOC and 80 for COCO
# Keep only predicted boxes and scores where scores for this class are above the minimum score
class_scores = class_scores_all[:, c]
top_k_scores, _ = torch.topk(class_scores, min(5000, len(class_scores) - 1), dim=0)
min_score = max(min_score, top_k_scores.min())
score_above_min_score = (class_scores >= min_score).long() # for indexing
n_above_min_score = torch.sum(score_above_min_score).item()
if n_above_min_score == 0:
continue
class_scores = torch.index_select(class_scores, dim=0,
index=torch.nonzero(score_above_min_score).squeeze(dim=1))
class_decoded_locs = torch.index_select(decoded_locs_all, dim=0,
index=torch.nonzero(score_above_min_score).squeeze(dim=1))
anchor_nms_idx = nms(class_decoded_locs, class_scores, max_overlap)
image_boxes.append(class_decoded_locs[anchor_nms_idx, :])
image_labels.append(torch.LongTensor(anchor_nms_idx.size(0) * [c + 1]).to(device))
image_scores.append(class_scores[anchor_nms_idx])
# If no object in any class is found, store a placeholder for 'background'
if len(image_boxes) == 0:
image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))
image_labels.append(torch.LongTensor([0]).to(device))
image_scores.append(torch.FloatTensor([0.]).to(device))
# Concatenate into single tensors
image_boxes = torch.cat(image_boxes, dim=0) # (n_objects, 4)
image_labels = torch.cat(image_labels, dim=0) # (n_objects)
image_scores = torch.cat(image_scores, dim=0) # (n_objects)
n_objects = image_scores.size(0)
# Keep only the top k objects
if n_objects > top_k:
image_scores, sort_ind = image_scores.sort(dim=0, descending=True)
image_scores = image_scores[:top_k] # (top_k)
image_boxes = image_boxes[sort_ind][:top_k] # (top_k, 4)
image_labels = image_labels[sort_ind][:top_k] # (top_k)
# Append to lists that store predicted boxes and scores for all images
all_images_boxes.append(image_boxes)
all_images_labels.append(image_labels)
all_images_scores.append(image_scores)
return all_images_boxes, all_images_labels, all_images_scores
| 42.559748
| 127
| 0.641052
| 1,819
| 13,534
| 4.534909
| 0.118747
| 0.034671
| 0.020487
| 0.019639
| 0.898533
| 0.891381
| 0.871984
| 0.852346
| 0.836829
| 0.836829
| 0
| 0.019351
| 0.266883
| 13,534
| 317
| 128
| 42.694006
| 0.812034
| 0.256982
| 0
| 0.837563
| 0
| 0
| 0.009102
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035533
| false
| 0
| 0.025381
| 0
| 0.106599
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5be24ad411bcfc5d0513f8008febdf21195cf1bf
| 76
|
py
|
Python
|
scripts/help_funcs.py
|
Vokda/master_thesis
|
7fee16ec31c2b10592cbb525b643d241bc526165
|
[
"MIT"
] | null | null | null |
scripts/help_funcs.py
|
Vokda/master_thesis
|
7fee16ec31c2b10592cbb525b643d241bc526165
|
[
"MIT"
] | null | null | null |
scripts/help_funcs.py
|
Vokda/master_thesis
|
7fee16ec31c2b10592cbb525b643d241bc526165
|
[
"MIT"
] | null | null | null |
import numpy as np
def sum_mean(data):
return sum(data), np.mean(data)
| 15.2
| 35
| 0.697368
| 14
| 76
| 3.714286
| 0.642857
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 76
| 4
| 36
| 19
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
5bf0e7ee7ab263b70a1e674d089af0f483404847
| 5,113
|
py
|
Python
|
tests/alerters/gitter_test.py
|
vbisserie/elastalert2
|
982115f0de055595fa452c425b6a15bedc3161cf
|
[
"Apache-2.0"
] | null | null | null |
tests/alerters/gitter_test.py
|
vbisserie/elastalert2
|
982115f0de055595fa452c425b6a15bedc3161cf
|
[
"Apache-2.0"
] | null | null | null |
tests/alerters/gitter_test.py
|
vbisserie/elastalert2
|
982115f0de055595fa452c425b6a15bedc3161cf
|
[
"Apache-2.0"
] | null | null | null |
import json
import mock
import pytest
from requests import RequestException
from elastalert.alerters.gitter import GitterAlerter
from elastalert.loaders import FileRulesLoader
from elastalert.util import EAException
def test_gitter_msg_level_default():
rule = {
'name': 'Test Gitter Rule',
'type': 'any',
'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = GitterAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'message': 'Test Gitter Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n',
'level': 'error'
}
mock_post_request.assert_called_once_with(
rule['gitter_webhook_url'],
mock.ANY,
headers={'content-type': 'application/json'},
proxies=None
)
actual_data = json.loads(mock_post_request.call_args_list[0][0][1])
assert expected_data == actual_data
assert 'error' in actual_data['level']
def test_gitter_msg_level_info():
rule = {
'name': 'Test Gitter Rule',
'type': 'any',
'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx',
'gitter_msg_level': 'info',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = GitterAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'message': 'Test Gitter Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n',
'level': 'info'
}
mock_post_request.assert_called_once_with(
rule['gitter_webhook_url'],
mock.ANY,
headers={'content-type': 'application/json'},
proxies=None
)
actual_data = json.loads(mock_post_request.call_args_list[0][0][1])
assert expected_data == actual_data
assert 'info' in actual_data['level']
def test_gitter_msg_level_error():
rule = {
'name': 'Test Gitter Rule',
'type': 'any',
'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx',
'gitter_msg_level': 'error',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = GitterAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'message': 'Test Gitter Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n',
'level': 'error'
}
mock_post_request.assert_called_once_with(
rule['gitter_webhook_url'],
mock.ANY,
headers={'content-type': 'application/json'},
proxies=None
)
actual_data = json.loads(mock_post_request.call_args_list[0][0][1])
assert expected_data == actual_data
assert 'error' in actual_data['level']
def test_gitter_proxy():
rule = {
'name': 'Test Gitter Rule',
'type': 'any',
'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx',
'gitter_msg_level': 'error',
'gitter_proxy': 'http://proxy.url',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = GitterAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'message': 'Test Gitter Rule\n\n@timestamp: 2021-01-01T00:00:00\nsomefield: foobarbaz\n',
'level': 'error'
}
mock_post_request.assert_called_once_with(
rule['gitter_webhook_url'],
mock.ANY,
headers={'content-type': 'application/json'},
proxies={'https': 'http://proxy.url'}
)
actual_data = json.loads(mock_post_request.call_args_list[0][0][1])
assert expected_data == actual_data
assert 'error' in actual_data['level']
def test_gitter_ea_exception():
try:
rule = {
'name': 'Test Gitter Rule',
'type': 'any',
'gitter_webhook_url': 'https://webhooks.gitter.im/e/xxxxx',
'gitter_msg_level': 'error',
'gitter_proxy': 'http://proxy.url',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = GitterAlerter(rule)
match = {
'@timestamp': '2021-01-01T00:00:00',
'somefield': 'foobarbaz'
}
mock_run = mock.MagicMock(side_effect=RequestException)
with mock.patch('requests.post', mock_run), pytest.raises(RequestException):
alert.alert([match])
except EAException:
assert True
| 29.726744
| 97
| 0.613925
| 592
| 5,113
| 5.087838
| 0.141892
| 0.046481
| 0.059761
| 0.059761
| 0.874834
| 0.859562
| 0.859562
| 0.859562
| 0.859562
| 0.84429
| 0
| 0.035714
| 0.244279
| 5,113
| 171
| 98
| 29.900585
| 0.743789
| 0
| 0
| 0.738255
| 0
| 0.026846
| 0.287502
| 0.024252
| 0
| 0
| 0
| 0
| 0.087248
| 1
| 0.033557
| false
| 0
| 0.04698
| 0
| 0.080537
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
753b2cddad8d46203f0caead94cd9f5db5abe1ba
| 13,903
|
py
|
Python
|
6.0/rubik_v2.py
|
vezril/IEEEXtreme
|
b0952cbe4a47a00f387f9f849bd6b632d6507126
|
[
"MIT"
] | null | null | null |
6.0/rubik_v2.py
|
vezril/IEEEXtreme
|
b0952cbe4a47a00f387f9f849bd6b632d6507126
|
[
"MIT"
] | null | null | null |
6.0/rubik_v2.py
|
vezril/IEEEXtreme
|
b0952cbe4a47a00f387f9f849bd6b632d6507126
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.6
import copy
class Cube():
'''
Left - left
Face - front
Right - Side 3
Back - Side 4
Top - Side 5
Bottom - bottom
'''
def __init__(self):
self.left = [['g','g','g'],['g','g','g'],['g','g','g']]
self.front = [['p','p','p'],['p','p','p'],['p','p','p']]
self.right = [['b','b','b'],['b','b','b'],['b','b','b']]
self.back = [['x','x','x'],['x','x','x'],['x','x','x']]
self.up = [['r','r','r'],['r','r','r'],['r','r','r']]
self.bottom = [['o','o','o'],['o','o','o'],['o','o','o']]
self.buffer1 = self.left
self.buffer2 = self.front
###################################################################################
def rotate_left(self,direction):
# Modification of side, 2,4,5 and 6
if(direction == 'reverse'):
self.buffer1 = copy.deepcopy(self.up)
self.up[0][0] = self.front[0][0]
self.up[1][0] = self.front[1][0]
self.up[2][0] = self.front[2][0]
self.buffer2 = copy.deepcopy(self.back)
self.back[0][0] = self.buffer1[2][0]
self.back[1][0] = self.buffer1[1][0]
self.back[2][0] = self.buffer1[0][0]
self.buffer1 = copy.deepcopy(self.bottom)
self.bottom[2][0] = self.buffer2[0][0]
self.bottom[1][0] = self.buffer2[1][0]
self.bottom[0][0] = self.buffer2[2][0]
self.front[0][0] = self.buffer1[0][0]
self.front[1][0] = self.buffer1[1][0]
self.front[2][0] = self.buffer1[2][0]
# Rotation of side
self.buffer1 = copy.deepcopy(self.left)
self.left[0][0] = self.buffer1[2][0]
self.left[0][1] = self.buffer1[1][0]
self.left[0][2] = self.buffer1[0][0]
self.left[1][0] = self.buffer1[2][1]
self.left[1][2] = self.buffer1[0][1]
self.left[2][0] = self.buffer1[2][2]
self.left[2][1] = self.buffer1[1][2]
self.left[2][2] = self.buffer1[0][2]
elif(direction == 'normal'):
self.buffer1 = copy.deepcopy(self.bottom)
self.bottom[0][0] = self.front[0][0]
self.bottom[1][0] = self.front[1][0]
self.bottom[2][0] = self.front[2][0]
self.buffer2 = copy.deepcopy(self.back)
self.back[0][0] = self.buffer1[2][0]
self.back[1][0] = self.buffer1[1][0]
self.back[2][0] = self.buffer1[0][0]
self.buffer1 = copy.deepcopy(self.up)
self.up[2][0] = self.buffer2[0][0]
self.up[1][0] = self.buffer2[1][0]
self.up[0][0] = self.buffer2[2][0]
self.front[0][0] = self.buffer1[0][0]
self.front[1][0] = self.buffer1[1][0]
self.front[2][0] = self.buffer1[2][0]
# Rotation of side
self.buffer1 = copy.deepcopy(self.left)
self.left[0][0] = self.buffer1[0][2]
self.left[0][1] = self.buffer1[1][2]
self.left[0][2] = self.buffer1[2][2]
self.left[1][0] = self.buffer1[0][1]
self.left[1][2] = self.buffer1[2][1]
self.left[2][0] = self.buffer1[0][0]
self.left[2][1] = self.buffer1[1][0]
self.left[2][2] = self.buffer1[2][0]
else:
print "Invalid direction in rotate_left() method"
###################################################################################
def rotate_bottom(self,direction):
if(direction == 'normal'):
self.buffer1 = copy.deepcopy(self.front)
self.front[2][0] = self.left[2][0]
self.front[2][1] = self.left[2][1]
self.front[2][2] = self.left[2][2]
self.buffer2 = copy.deepcopy(self.right)
self.right[2][0] = self.buffer1[2][0]
self.right[2][1] = self.buffer1[2][1]
self.right[2][2] = self.buffer1[2][2]
self.buffer1 = copy.deepcopy(self.back)
self.back[2][0] = self.buffer2[2][2]
self.back[2][1] = self.buffer2[2][1]
self.back[2][2] = self.buffer2[2][0]
self.left[2][0] = self.buffer1[2][2]
self.left[2][1] = self.buffer1[2][1]
self.left[2][2] = self.buffer1[2][0]
self.buffer1 = copy.deepcopy(self.up)
self.up[0][0] = self.buffer1[2][0]
self.up[0][1] = self.buffer1[1][0]
self.up[0][2] = self.buffer1[0][0]
self.up[1][0] = self.buffer1[2][1]
self.up[1][2] = self.buffer1[0][1]
self.up[2][0] = self.buffer1[2][2]
self.up[2][1] = self.buffer1[1][2]
self.up[2][2] = self.buffer1[0][2]
elif(direction == 'left'):
self.buffer1 = copy.deepcopy(self.back)
self.back[2][0] = self.left[2][2]
self.back[2][1] = self.left[2][1]
self.back[2][2] = self.left[2][0]
self.buffer2 = copy.deepcopy(self.right)
self.right[2][0] = self.buffer1[2][2]
self.right[2][1] = self.buffer1[2][1]
self.right[2][2] = self.buffer1[2][0]
self.buffer1 = copy.deepcopy(self.front)
self.front[2][0] = self.buffer2[2][2]
self.front[2][1] = self.buffer2[2][1]
self.front[2][2] = self.buffer2[2][0]
self.left[2][0] = self.buffer1[2][0]
self.left[2][1] = self.buffer1[2][1]
self.left[2][2] = self.buffer1[2][2]
self.buffer1 = copy.deepcopy(self.up)
self.up[0][0] = self.buffer1[2][0]
self.up[0][1] = self.buffer1[1][0]
self.up[0][2] = self.buffer1[0][0]
self.up[1][0] = self.buffer1[2][1]
self.up[1][2] = self.buffer1[0][1]
self.up[2][0] = self.buffer1[2][2]
self.up[2][1] = self.buffer1[1][2]
self.up[2][2] = self.buffer1[0][2]
else:
print "Invalid direction in rotate_top() method"
###################################################################################
def rotate_right(self,direction):
# Modification of side, 2,4,5 and 6
if(direction == 'up'):
self.buffer1 = copy.deepcopy(self.up)
self.up[0][0] = self.front[0][0]
self.up[1][0] = self.front[1][0]
self.up[2][0] = self.front[2][0]
self.buffer2 = copy.deepcopy(self.back)
self.back[0][0] = self.buffer1[2][0]
self.back[1][0] = self.buffer1[1][0]
self.back[2][0] = self.buffer1[0][0]
self.buffer1 = copy.deepcopy(self.bottom)
self.bottom[2][0] = self.buffer2[0][0]
self.bottom[1][0] = self.buffer2[1][0]
self.bottom[0][0] = self.buffer2[2][0]
self.front[0][0] = self.buffer1[0][0]
self.front[1][0] = self.buffer1[1][0]
self.front[2][0] = self.buffer1[2][0]
# Rotation of side
self.buffer1 = copy.deepcopy(self.left)
self.left[0][0] = self.buffer1[2][0]
self.left[0][1] = self.buffer1[1][0]
self.left[0][2] = self.buffer1[0][0]
self.left[1][0] = self.buffer1[2][1]
self.left[1][2] = self.buffer1[0][1]
self.left[2][0] = self.buffer1[2][2]
self.left[2][1] = self.buffer1[1][2]
self.left[2][2] = self.buffer1[0][2]
elif(direction == 'down'):
self.buffer1 = copy.deepcopy(self.bottom)
self.bottom[0][0] = self.front[0][0]
self.bottom[1][0] = self.front[1][0]
self.bottom[2][0] = self.front[2][0]
self.buffer2 = copy.deepcopy(self.back)
self.back[0][0] = self.buffer1[2][0]
self.back[1][0] = self.buffer1[1][0]
self.back[2][0] = self.buffer1[0][0]
self.buffer1 = copy.deepcopy(self.up)
self.up[2][0] = self.buffer2[0][0]
self.up[1][0] = self.buffer2[1][0]
self.up[0][0] = self.buffer2[2][0]
self.front[0][0] = self.buffer1[0][0]
self.front[1][0] = self.buffer1[1][0]
self.front[2][0] = self.buffer1[2][0]
# Rotation of side
self.buffer1 = copy.deepcopy(self.left)
self.left[0][0] = self.buffer1[0][2]
self.left[0][1] = self.buffer1[1][2]
self.left[0][2] = self.buffer1[2][2]
self.left[1][0] = self.buffer1[0][1]
self.left[1][2] = self.buffer1[2][1]
self.left[2][0] = self.buffer1[0][0]
self.left[2][1] = self.buffer1[1][0]
self.left[2][2] = self.buffer1[2][0]
else:
print "Invalid direction in rotate_right() method"
###################################################################################
def rotate_bottom(self,direction):
if(direction == 'right'):
self.buffer1 = copy.deepcopy(self.front)
self.front[0][0] = self.left[0][0]
self.front[0][1] = self.left[0][1]
self.front[0][2] = self.left[0][2]
self.buffer2 = copy.deepcopy(self.right)
self.right[0][0] = self.buffer1[0][0]
self.right[0][1] = self.buffer1[0][1]
self.right[0][2] = self.buffer1[0][2]
self.buffer1 = copy.deepcopy(self.back)
self.back[0][0] = self.buffer2[0][2]
self.back[0][1] = self.buffer2[0][1]
self.back[0][2] = self.buffer2[0][0]
self.left[0][0] = self.buffer1[0][2]
self.left[0][1] = self.buffer1[0][1]
self.left[0][2] = self.buffer1[0][0]
self.buffer1 = copy.deepcopy(self.bottom)
self.bottom[0][0] = self.buffer1[0][2]
self.bottom[0][1] = self.buffer1[1][2]
self.bottom[0][2] = self.buffer1[2][2]
self.bottom[1][0] = self.buffer1[0][1]
self.bottom[1][2] = self.buffer1[2][1]
self.bottom[2][0] = self.buffer1[0][0]
self.bottom[2][1] = self.buffer1[1][0]
self.bottom[2][2] = self.buffer1[2][0]
elif(direction == 'left'):
self.buffer1 = copy.deepcopy(self.back)
self.back[0][0] = self.left[0][2]
self.back[0][1] = self.left[0][1]
self.back[0][2] = self.left[0][0]
self.buffer2 = copy.deepcopy(self.right)
self.right[0][0] = self.buffer1[0][2]
self.right[0][1] = self.buffer1[0][1]
self.right[0][2] = self.buffer1[0][0]
self.buffer1 = copy.deepcopy(self.front)
self.front[0][0] = self.buffer2[0][2]
self.front[0][1] = self.buffer2[0][1]
self.front[0][2] = self.buffer2[0][0]
self.left[0][0] = self.buffer1[0][0]
self.left[0][1] = self.buffer1[0][1]
self.left[0][2] = self.buffer1[0][2]
self.buffer1 = copy.deepcopy(self.bottom)
self.bottom[0][0] = self.buffer1[2][0]
self.bottom[0][1] = self.buffer1[1][0]
self.bottom[0][2] = self.buffer1[0][0]
self.bottom[1][0] = self.buffer1[2][1]
self.bottom[1][2] = self.buffer1[0][1]
self.bottom[2][0] = self.buffer1[2][2]
self.bottom[2][1] = self.buffer1[1][2]
self.bottom[2][2] = self.buffer1[0][2]
else:
print "Invalid direction in rotate_bottom() method"
###################################################################################
def print_cube(self):
print " \t" + self.up[2][0] + " " + self.up[2][1] + " " + self.up[2][2] + "\n"
print " \t" + self.up[1][0] + " " + self.up[1][1] + " " + self.up[1][2] + "\n"
print " \t" + self.up[0][0] + " " + self.up[0][1] + " " + self.up[0][2] + "\n\n"
print self.left[2][0] + " " + self.left[2][1] + " " + self.left[2][2] + "\t" + self.front[2][0] + " " + self.front[2][1] + " " + self.front[2][2] + "\t" + self.right[2][0] + " " + self.right[2][1] + " " + self.right[2][2] + "\t" + self.back[2][0] + " " + self.back[2][1] + " " + self.back[2][2] + "\n"
print self.left[1][0] + " " + self.left[1][1] + " " + self.left[1][2] + "\t" + self.front[1][0] + " " + self.front[1][1] + " " + self.front[1][2] + "\t" + self.right[1][0] + " " + self.right[1][1] + " " + self.right[1][2] + "\t" + self.back[1][0] + " " + self.back[1][1] + " " + self.back[1][2] + "\n"
print self.left[0][0] + " " + self.left[0][1] + " " + self.left[0][2] + "\t" + self.front[0][0] + " " + self.front[0][1] + " " + self.front[0][2] + "\t" + self.right[0][0] + " " + self.right[0][1] + " " + self.right[0][2] + "\t" + self.back[0][0] + " " + self.back[0][1] + " " + self.back[0][2] + "\n\n"
print " \t" + self.bottom[2][0] + " " + self.bottom[2][1] + " " + self.bottom[2][2] + "\n"
print " \t" + self.bottom[1][0] + " " + self.bottom[1][1] + " " + self.bottom[1][2] + "\n"
print " \t" + self.bottom[0][0] + " " + self.bottom[0][1] + " " + self.bottom[0][2] + "\n\n"
if __name__ == "__main__":
r = Cube()
| 42.51682
| 311
| 0.439258
| 1,938
| 13,903
| 3.140351
| 0.028896
| 0.153631
| 0.069011
| 0.057673
| 0.933947
| 0.89024
| 0.85442
| 0.731679
| 0.708018
| 0.707361
| 0
| 0.099498
| 0.32698
| 13,903
| 326
| 312
| 42.647239
| 0.550924
| 0.013163
| 0
| 0.629787
| 0
| 0
| 0.029356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.004255
| null | null | 0.059574
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f3367625810756a77cedcf8682935e87f67d0a2b
| 283
|
py
|
Python
|
openml-generic-python/src/test/resources/valid_classifier/ClassifierApi/classifier.py
|
henriquevcosta/feedzai-openml-python
|
fd24644230121a7579bea414d46996de4dd29273
|
[
"Apache-2.0"
] | 6
|
2018-06-12T10:32:37.000Z
|
2020-03-25T13:24:32.000Z
|
openml-generic-python/src/test/resources/valid_classifier/ClassifierApi/classifier.py
|
henriquevcosta/feedzai-openml-python
|
fd24644230121a7579bea414d46996de4dd29273
|
[
"Apache-2.0"
] | 39
|
2018-06-12T10:32:38.000Z
|
2021-04-07T13:53:49.000Z
|
openml-generic-python/src/test/resources/valid_classifier/ClassifierApi/classifier.py
|
henriquevcosta/feedzai-openml-python
|
fd24644230121a7579bea414d46996de4dd29273
|
[
"Apache-2.0"
] | 6
|
2018-06-15T14:27:41.000Z
|
2020-11-24T15:29:52.000Z
|
class ClassifierBase(object):
def getClassDistribution(self, instance):
raise NotImplementedError("This must be implemented by a concrete adapter.")
def classify(self, instance):
raise NotImplementedError("This must be implemented by a concrete adapter.")
| 31.444444
| 84
| 0.738516
| 31
| 283
| 6.741935
| 0.580645
| 0.114833
| 0.162679
| 0.344498
| 0.717703
| 0.717703
| 0.717703
| 0.717703
| 0.717703
| 0.717703
| 0
| 0
| 0.187279
| 283
| 8
| 85
| 35.375
| 0.908696
| 0
| 0
| 0.4
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f39c43446140e6cf23408395b93de1d2e61a7999
| 22,189
|
py
|
Python
|
tests/test_engine.py
|
PrabhuLoganathan/Pytest--Play
|
7e3ec041cf5949f3f73cb2dcf315d9b894c2558f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_engine.py
|
PrabhuLoganathan/Pytest--Play
|
7e3ec041cf5949f3f73cb2dcf315d9b894c2558f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_engine.py
|
PrabhuLoganathan/Pytest--Play
|
7e3ec041cf5949f3f73cb2dcf315d9b894c2558f
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import mock
from datetime import (
datetime,
timedelta,
)
def test_play_engine_constructor(bdd_vars, parametrizer_class):
from pytest_play.engine import PlayEngine
executor = PlayEngine(None, bdd_vars, parametrizer_class)
assert executor.parametrizer_class is parametrizer_class
assert executor.navigation is None
assert executor.variables == bdd_vars
def test_splinter_executor_parametrizer(dummy_executor):
assert dummy_executor.parametrizer.parametrize('$foo') == 'bar'
def test_splinter_execute(dummy_executor):
execute_command_mock = mock.MagicMock()
dummy_executor.execute_command = execute_command_mock
json_data = {
'steps': [
{'type': 'get', 'url': 'http://1'},
{'type': 'get', 'url': 'http://2'}
]
}
dummy_executor.execute(json_data)
calls = [
mock.call(json_data['steps'][0]),
mock.call(json_data['steps'][1]),
]
assert dummy_executor.execute_command.assert_has_calls(
calls, any_order=False) is None
def test_execute_bad_type(dummy_executor):
command = {'typeXX': 'get', 'url': 'http://1'}
with pytest.raises(KeyError):
dummy_executor.execute_command(command)
def test_execute_bad_command(dummy_executor):
command = {'type': 'get', 'urlXX': 'http://1'}
with pytest.raises(KeyError):
dummy_executor.execute_command(command)
def test_execute_not_implemented_command(dummy_executor):
command = {'type': 'new_command', 'urlXX': 'http://1'}
dummy_executor.COMMANDS = ['new_command']
with pytest.raises(NotImplementedError):
dummy_executor.execute_command(command)
def test_execute_condition_true(dummy_executor):
command = {'type': 'get',
'url': 'http://1',
'condition': '"$foo" === "bar"'}
dummy_executor.navigation.page.driver.evaluate_script.return_value = True
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.assert_called_once_with('"bar" === "bar"') is None
dummy_executor \
.navigation \
.page \
.driver_adapter \
.open \
.assert_called_once_with(command['url']) is None
def test_execute_condition_false(dummy_executor):
command = {'type': 'get',
'url': 'http://1',
'condition': '"$foo" === "bar1"'}
dummy_executor.navigation.page.driver.evaluate_script.return_value = False
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.assert_called_once_with('"bar" === "bar1"') is None
dummy_executor \
.navigation \
.page \
.driver_adapter \
.open \
.called is False
def test_execute_get(dummy_executor):
command = {'type': 'get', 'url': 'http://1'}
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.driver_adapter \
.open \
.assert_called_once_with(command['url']) is None
def test_execute_get_basestring(dummy_executor):
command = """{"type": "get", "url": "http://1"}"""
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.driver_adapter \
.open \
.assert_called_once_with('http://1') is None
def test_execute_get_basestring_param(dummy_executor):
command = """{"type": "get", "url": "http://$foo"}"""
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.driver_adapter \
.open \
.assert_called_once_with('http://bar') is None
def test_execute_click(dummy_executor):
command = {
'type': 'clickElement',
'locator': {
'type': 'css',
'value': 'body'
}
}
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
.click \
.assert_called_once_with() is None
assert dummy_executor.navigation.page.wait.until.called is True
def test_execute_fill(dummy_executor):
command = {
'type': 'setElementText',
'locator': {
'type': 'css',
'value': 'body'
},
'text': 'text value',
}
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
.fill \
.assert_called_once_with('text value') is None
def test_execute_select_text(dummy_executor):
command = {
'type': 'select',
'locator': {
'type': 'css',
'value': 'body'
},
'text': 'text value',
}
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
._element \
.find_element_by_xpath \
.assert_called_once_with(
'./option[text()="{0}"]'.format('text value')) is None
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
._element \
.find_element_by_xpath \
.return_value \
.click \
.assert_called_once_with() is None
def test_execute_select_value(dummy_executor):
command = {
'type': 'select',
'locator': {
'type': 'css',
'value': 'body'
},
'value': '1',
}
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
._element \
.find_element_by_xpath \
.assert_called_once_with(
'./option[@value="{0}"]'.format('1')) is None
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
._element \
.find_element_by_xpath \
.return_value \
.click \
.assert_called_once_with() is None
def test_execute_select_bad(dummy_executor):
command = {
'type': 'select',
'locator': {
'type': 'css',
'value': 'body'
},
'value': '1',
'text': 'text',
}
with pytest.raises(ValueError):
dummy_executor.execute_command(command)
def test_execute_assert_element_present_default(dummy_executor):
command = {
'type': 'assertElementPresent',
'locator': {
'type': 'css',
'value': 'body'
},
}
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_assert_element_present_negated(dummy_executor):
command = {
'type': 'assertElementPresent',
'locator': {
'type': 'css',
'value': 'body'
},
'negated': False,
}
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_assert_element_present_negated_false(dummy_executor):
command = {
'type': 'assertElementPresent',
'locator': {
'type': 'css',
'value': 'body'
},
'negated': False,
}
dummy_executor.navigation.page.find_element.return_value = None
with pytest.raises(AssertionError):
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_assert_element_present_negated_true(dummy_executor):
command = {
'type': 'assertElementPresent',
'locator': {
'type': 'css',
'value': 'body'
},
'negated': True,
}
dummy_executor.navigation.page.find_element.return_value = 1
with pytest.raises(AssertionError):
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_assert_element_visible_default(dummy_executor):
command = {
'type': 'assertElementVisible',
'locator': {
'type': 'css',
'value': 'body'
},
}
dummy_executor.navigation.page.find_element.return_value.visible = True
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_assert_element_visible_negated(dummy_executor):
command = {
'type': 'assertElementVisible',
'locator': {
'type': 'css',
'value': 'body'
},
'negated': False,
}
dummy_executor.navigation.page.find_element.return_value.visible = True
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_assert_element_visible_negated_false(dummy_executor):
command = {
'type': 'assertElementVisible',
'locator': {
'type': 'css',
'value': 'body'
},
'negated': False,
}
dummy_executor.navigation.page.find_element.return_value.visible = False
with pytest.raises(AssertionError):
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_assert_element_visible_negated_true(dummy_executor):
command = {
'type': 'assertElementVisible',
'locator': {
'type': 'css',
'value': 'body'
},
'negated': True,
}
dummy_executor.navigation.page.find_element.return_value.visible = True
with pytest.raises(AssertionError):
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_send_keys(dummy_executor):
from selenium.webdriver.common.keys import Keys
command = {
'type': 'sendKeysToElement',
'locator': {
'type': 'css',
'value': 'body'
},
'text': 'ENTER',
}
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
._element \
.send_keys \
.assert_called_once_with(getattr(Keys, 'ENTER'))
def test_execute_send_keys_bad(dummy_executor):
command = {
'type': 'sendKeysToElement',
'locator': {
'type': 'css',
'value': 'body'
},
'text': 'ENTERxxx',
}
with pytest.raises(ValueError):
dummy_executor.execute_command(command)
def test_execute_pause(dummy_executor):
command = {
'type': 'pause',
'waitTime': '1500',
}
now = datetime.now()
dummy_executor.execute_command(command)
now_now = datetime.now()
future_date = now + timedelta(milliseconds=1500)
assert now_now >= future_date
def test_execute_pause_int(dummy_executor):
command = {
'type': 'pause',
'waitTime': 1500,
}
now = datetime.now()
dummy_executor.execute_command(command)
now_now = datetime.now()
future_date = now + timedelta(milliseconds=1500)
assert now_now >= future_date
def test_execute_pause_bad(dummy_executor):
command = {
'type': 'pause',
'waitTime': 'adsf',
}
with pytest.raises(ValueError):
dummy_executor.execute_command(command)
def test_execute_store_eval(dummy_executor):
command = {
'type': 'storeEval',
'variable': 'TAG_NAME',
'script': 'document.body.tagName',
}
assert 'TAG_NAME' not in dummy_executor.variables
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.return_value = 'BODY'
dummy_executor.execute_command(command)
assert dummy_executor.variables['TAG_NAME'] == 'BODY'
def test_execute_store_eval_param(dummy_executor):
command = {
'type': 'storeEval',
'variable': 'DYNAMIC',
'script': '"$foo" + "$foo"',
}
assert 'DYNAMIC' not in dummy_executor.variables
assert 'foo' in dummy_executor.variables
assert dummy_executor.variables['foo'] == 'bar'
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.assert_called_once_with('"bar" + "bar"')
def test_execute_eval(dummy_executor):
command = {
'type': 'eval',
'script': '"$foo" + "$foo"',
}
assert dummy_executor.variables['foo'] == 'bar'
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.assert_called_once_with('"bar" + "bar"')
def test_execute_verify_eval(dummy_executor):
command = {
'type': 'verifyEval',
'value': 'result',
'script': '"res" + "ult"',
}
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.return_value = 'result'
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.assert_called_once_with('"res" + "ult"')
def test_execute_verify_eval_false(dummy_executor):
command = {
'type': 'verifyEval',
'value': 'result',
'script': '"res" + "ult"',
}
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.return_value = 'resultXXX'
with pytest.raises(AssertionError):
dummy_executor.execute_command(command)
def test_execute_verify_eval_param(dummy_executor):
command = {
'type': 'verifyEval',
'value': 'resultbar',
'script': '"res" + "ult" + "$foo"',
}
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.return_value = 'resultbar'
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.driver \
.evaluate_script \
.assert_called_once_with('"res" + "ult" + "bar"')
def test_execute_wait_until_condition(dummy_executor):
command = {
'type': 'waitUntilCondition',
'script': "document.body.getAttribute('id')",
}
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.wait \
.until \
.called
def test_execute_wait_for_element_present(dummy_executor):
command = {
'type': 'waitForElementPresent',
'locator': {
'type': 'css',
'value': 'body'
},
}
def _until(func):
func(dummy_executor.navigation.page.driver)
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
.visible = True
dummy_executor \
.navigation \
.page \
.wait \
.until \
.side_effect = _until
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.wait \
.until \
.called
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_wait_for_element_visible(dummy_executor):
command = {
'type': 'waitForElementVisible',
'locator': {
'type': 'css',
'value': 'body'
},
}
def _until(func):
func(dummy_executor.navigation.page.driver)
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
.visible = True
dummy_executor \
.navigation \
.page \
.wait \
.until \
.side_effect = _until
dummy_executor.execute_command(command)
dummy_executor \
.navigation \
.page \
.wait \
.until \
.called
dummy_executor \
.navigation \
.page \
.find_element \
.assert_called_once_with('css', 'body') is None
def test_execute_verify_text_default(dummy_executor):
command = {
'type': 'verifyText',
'locator': {
'type': 'css',
'value': '.my-item'
},
'text': 'a text',
}
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
.text = 'hi, this is a text!'
dummy_executor.execute_command(command)
def test_execute_verify_text(dummy_executor):
command = {
'type': 'verifyText',
'locator': {
'type': 'css',
'value': '.my-item'
},
'text': 'a text',
'negated': False
}
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
.text = 'hi, this is a text!'
dummy_executor.execute_command(command)
def test_execute_verify_text_negated(dummy_executor):
command = {
'type': 'verifyText',
'locator': {
'type': 'css',
'value': '.my-item'
},
'text': 'a text',
'negated': True
}
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
.text = 'hi, this is a text!'
with pytest.raises(AssertionError):
dummy_executor.execute_command(command)
def test_execute_verify_text_false(dummy_executor):
command = {
'type': 'verifyText',
'locator': {
'type': 'css',
'value': '.my-item'
},
'text': 'a text',
}
dummy_executor \
.navigation \
.page \
.find_element \
.return_value \
.text = 'hi, this is another text!'
with pytest.raises(AssertionError):
dummy_executor.execute_command(command)
def test_new_provider_custom_command(dummy_executor):
command = {'type': 'newCommand', 'provider': 'newprovider'}
dummy_provider = mock.MagicMock()
with pytest.raises(ValueError):
dummy_executor.execute_command(command)
dummy_executor.register_command_provider(
dummy_provider, 'newprovider')
# execute new custom command
dummy_executor.execute_command(command)
assert dummy_provider.assert_called_once_with(dummy_executor) is None
assert dummy_provider \
.return_value \
.command_newCommand \
.assert_called_once_with(command) is None
def test_splinter_execute_includes(dummy_executor):
execute_command_mock = mock.MagicMock()
dummy_executor.execute_command = execute_command_mock
json_data = {
'steps': [
{'type': 'include', 'provider': 'login.json'},
{'type': 'get', 'url': 'http://2'}
]
}
dummy_executor.execute(json_data)
calls = [
mock.call(json_data['steps'][0]),
mock.call(json_data['steps'][1]),
]
assert dummy_executor.execute_command.assert_has_calls(
calls, any_order=False) is None
def test_include(play_json, test_run_identifier):
json_data = {
"steps": [
{"provider": "included-scenario.json", "type": "include"},
{"type": "get", "url": "http://2"},
{"type": "get", "url": "http://{0}".format(test_run_identifier)}
]
}
play_json.execute(json_data)
calls = [
mock.call('http://'),
mock.call('http://2'),
mock.call('http://{0}'.format(test_run_identifier)),
]
assert play_json \
.navigation \
.page \
.driver_adapter \
.open \
.assert_has_calls(
calls, any_order=False) is None
def test_include_string(play_json, test_run_identifier):
json_data = """
{
"steps": [
{"provider": "included-scenario.json", "type": "include"},
{"type": "get", "url": "http://2"},
{"type": "get", "url": "http://$test_run_identifier"}
]
}
"""
play_json.execute(json_data)
calls = [
mock.call('http://'),
mock.call('http://2'),
mock.call('http://{0}'.format(test_run_identifier)),
]
assert play_json \
.navigation \
.page \
.driver_adapter \
.open \
.assert_has_calls(
calls, any_order=False) is None
| 25.214773
| 78
| 0.572987
| 2,169
| 22,189
| 5.556017
| 0.074228
| 0.173679
| 0.116422
| 0.136669
| 0.863331
| 0.825741
| 0.801759
| 0.782757
| 0.778774
| 0.738196
| 0
| 0.002974
| 0.303033
| 22,189
| 879
| 79
| 25.243458
| 0.776269
| 0.001172
| 0
| 0.737617
| 0
| 0
| 0.122061
| 0.008393
| 0
| 0
| 0
| 0
| 0.105756
| 1
| 0.064257
| false
| 0
| 0.006693
| 0
| 0.07095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f3bb965bda607f68aa2976348e5aeb2c00347d49
| 20,018
|
py
|
Python
|
ninja_extra/controllers/route/__init__.py
|
eadwinCode/django-ninja-extra
|
16246c466ab8895ba1bf29d69f3d3e9337031edd
|
[
"MIT"
] | 43
|
2021-09-09T14:20:59.000Z
|
2022-03-28T00:38:52.000Z
|
ninja_extra/controllers/route/__init__.py
|
eadwinCode/django-ninja-extra
|
16246c466ab8895ba1bf29d69f3d3e9337031edd
|
[
"MIT"
] | 6
|
2022-01-04T10:53:11.000Z
|
2022-03-28T19:53:46.000Z
|
ninja_extra/controllers/route/__init__.py
|
eadwinCode/django-ninja-extra
|
16246c466ab8895ba1bf29d69f3d3e9337031edd
|
[
"MIT"
] | null | null | null |
import inspect
from typing import Any, List, Optional, Type, Union, cast
from ninja.constants import NOT_SET
from ninja.signature import is_async
from ninja.types import TCallable
from ninja_extra.controllers.response import ControllerResponse
from ninja_extra.permissions import BasePermission
from ninja_extra.schemas import RouteParameter
from .route_functions import AsyncRouteFunction, RouteFunction
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
DELETE = "DELETE"
GET = "GET"
ROUTE_METHODS = [POST, PUT, PATCH, DELETE, GET]
class RouteInvalidParameterException(Exception):
pass
def http_get(
path: str = "",
*,
auth: Any = NOT_SET,
response: Union[Any, List[Any]] = NOT_SET,
operation_id: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
deprecated: Optional[bool] = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
url_name: Optional[str] = None,
include_in_schema: bool = True,
permissions: Optional[List[Type[BasePermission]]] = None,
) -> "Route":
"""
A GET Operation method decorator
eg.
```python
@http_get()
def get_operation(self):
...
```
:param path: uniques endpoint path string
:param auth: endpoint authentication method. default: `NOT_SET`
:param response: `dict[status_code, schema]` or `Schema` used validated returned response. default: `None`
:param operation_id: unique id that distinguishes `operation` in path view. default: `None`
:param summary: describes your endpoint. default: `None`
:param description: other description of your endpoint. default: `None`
:param tags: list of strings for grouping endpoints only for documentation purpose. default: `None`
:param deprecated: declares an endpoint deprecated. default: `None`
:param by_alias: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_unset: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_defaults: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_none: pydantic schema filters applied to `response` schema object. default: `False`
:param url_name: a name to an endpoint which can be resolved using `reverse` function in django. default: `None`
:param include_in_schema: indicates whether an endpoint should appear on the swagger documentation
:param permissions: collection permission classes. default: `None`
:return: Route[GET]
"""
return Route(
path,
[GET],
auth=auth,
response=response,
operation_id=operation_id,
summary=summary,
description=description,
tags=tags,
deprecated=deprecated,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
url_name=url_name,
include_in_schema=include_in_schema,
permissions=permissions,
)
def http_post(
path: str = "",
*,
auth: Any = NOT_SET,
response: Union[Any, List[Any]] = NOT_SET,
operation_id: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
deprecated: Optional[bool] = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
url_name: Optional[str] = None,
include_in_schema: bool = True,
permissions: Optional[List[Type[BasePermission]]] = None,
) -> "Route":
"""
A POST Operation method decorator
eg.
```python
@http_post()
def post_operation(self, create_schema: Schema):
...
```
:param path: uniques endpoint path string
:param auth: endpoint authentication method. default: `NOT_SET`
:param response: `dict[status_code, schema]` or `Schema` used validated returned response. default: `None`
:param operation_id: unique id that distinguishes `operation` in path view. default: `None`
:param summary: describes your endpoint. default: `None`
:param description: other description of your endpoint. default: `None`
:param tags: list of strings for grouping endpoints only for documentation purpose. default: `None`
:param deprecated: declares an endpoint deprecated. default: `None`
:param by_alias: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_unset: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_defaults: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_none: pydantic schema filters applied to `response` schema object. default: `False`
:param url_name: a name to an endpoint which can be resolved using `reverse` function in django. default: `None`
:param include_in_schema: indicates whether an endpoint should appear on the swagger documentation
:param permissions: collection permission classes. default: `None`
:return: Route[POST]
"""
return Route(
path,
[POST],
auth=auth,
response=response,
operation_id=operation_id,
summary=summary,
description=description,
tags=tags,
deprecated=deprecated,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
url_name=url_name,
include_in_schema=include_in_schema,
permissions=permissions,
)
def http_delete(
path: str = "",
*,
auth: Any = NOT_SET,
response: Union[Any, List[Any]] = NOT_SET,
operation_id: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
deprecated: Optional[bool] = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
url_name: Optional[str] = None,
include_in_schema: bool = True,
permissions: Optional[List[Type[BasePermission]]] = None,
) -> "Route":
"""
A DELETE Operation method decorator
eg.
```python
@http_delete('/{int:some_id}')
def delete_operation(self, some_id: int):
...
```
:param path: uniques endpoint path string
:param auth: endpoint authentication method. default: `NOT_SET`
:param response: `dict[status_code, schema]` or `Schema` used validated returned response. default: `None`
:param operation_id: unique id that distinguishes `operation` in path view. default: `None`
:param summary: describes your endpoint. default: `None`
:param description: other description of your endpoint. default: `None`
:param tags: list of strings for grouping endpoints only for documentation purpose. default: `None`
:param deprecated: declares an endpoint deprecated. default: `None`
:param by_alias: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_unset: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_defaults: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_none: pydantic schema filters applied to `response` schema object. default: `False`
:param url_name: a name to an endpoint which can be resolved using `reverse` function in django. default: `None`
:param include_in_schema: indicates whether an endpoint should appear on the swagger documentation
:param permissions: collection permission classes. default: `None`
:return: Route[DELETE]
"""
return Route(
path,
[DELETE],
auth=auth,
response=response,
operation_id=operation_id,
summary=summary,
description=description,
tags=tags,
deprecated=deprecated,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
url_name=url_name,
include_in_schema=include_in_schema,
permissions=permissions,
)
def http_patch(
path: str = "",
*,
auth: Any = NOT_SET,
response: Union[Any, List[Any]] = NOT_SET,
operation_id: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
deprecated: Optional[bool] = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
url_name: Optional[str] = None,
include_in_schema: bool = True,
permissions: Optional[List[Type[BasePermission]]] = None,
) -> "Route":
"""
A PATCH Operation method decorator
eg.
```python
@http_patch('/{int:some_id}')
def patch_operation(self, some_id: int):
...
```
:param path: uniques endpoint path string
:param auth: endpoint authentication method. default: `NOT_SET`
:param response: `dict[status_code, schema]` or `Schema` used validated returned response. default: `None`
:param operation_id: unique id that distinguishes `operation` in path view. default: `None`
:param summary: describes your endpoint. default: `None`
:param description: other description of your endpoint. default: `None`
:param tags: list of strings for grouping endpoints only for documentation purpose. default: `None`
:param deprecated: declares an endpoint deprecated. default: `None`
:param by_alias: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_unset: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_defaults: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_none: pydantic schema filters applied to `response` schema object. default: `False`
:param url_name: a name to an endpoint which can be resolved using `reverse` function in django. default: `None`
:param include_in_schema: indicates whether an endpoint should appear on the swagger documentation
:param permissions: collection permission classes. default: `None`
:return: Route[PATCH]
"""
return Route(
path,
[PATCH],
auth=auth,
response=response,
operation_id=operation_id,
summary=summary,
description=description,
tags=tags,
deprecated=deprecated,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
url_name=url_name,
include_in_schema=include_in_schema,
permissions=permissions,
)
def http_put(
path: str = "",
*,
auth: Any = NOT_SET,
response: Union[Any, List[Any]] = NOT_SET,
operation_id: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
deprecated: Optional[bool] = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
url_name: Optional[str] = None,
include_in_schema: bool = True,
permissions: Optional[List[Type[BasePermission]]] = None,
) -> "Route":
"""
A PUT Operation method decorator
eg.
```python
@http_put('/{int:some_id}')
def put_operation(self, some_id: int):
...
```
:param path: uniques endpoint path string
:param auth: endpoint authentication method. default: `NOT_SET`
:param response: `dict[status_code, schema]` or `Schema` used validated returned response. default: `None`
:param operation_id: unique id that distinguishes `operation` in path view. default: `None`
:param summary: describes your endpoint. default: `None`
:param description: other description of your endpoint. default: `None`
:param tags: list of strings for grouping endpoints only for documentation purpose. default: `None`
:param deprecated: declares an endpoint deprecated. default: `None`
:param by_alias: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_unset: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_defaults: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_none: pydantic schema filters applied to `response` schema object. default: `False`
:param url_name: a name to an endpoint which can be resolved using `reverse` function in django. default: `None`
:param include_in_schema: indicates whether an endpoint should appear on the swagger documentation
:param permissions: collection permission classes. default: `None`
:return: Route[PUT]
"""
return Route(
path,
[PUT],
auth=auth,
response=response,
operation_id=operation_id,
summary=summary,
description=description,
tags=tags,
deprecated=deprecated,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
url_name=url_name,
include_in_schema=include_in_schema,
permissions=permissions,
)
def http_generic(
path: str = "",
*,
methods: List[str],
auth: Any = NOT_SET,
response: Union[Any, List[Any]] = NOT_SET,
operation_id: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
deprecated: Optional[bool] = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
url_name: Optional[str] = None,
include_in_schema: bool = True,
permissions: Optional[List[Type[BasePermission]]] = None,
) -> "Route":
"""
A Custom Operation method decorator, for creating route with more than one operation
eg.
```python
@http_generic('', methods=['POST', 'GET'])
def list_create(self, some_schema: Optional[Schema] = None):
...
```
:param path: uniques endpoint path string
:param methods: List of operations `GET, PUT, PATCH, DELETE, POST`
:param auth: endpoint authentication method. default: `NOT_SET`
:param response: `dict[status_code, schema]` or `Schema` used validated returned response. default: `None`
:param operation_id: unique id that distinguishes `operation` in path view. default: `None`
:param summary: describes your endpoint. default: `None`
:param description: other description of your endpoint. default: `None`
:param tags: list of strings for grouping endpoints only for documentation purpose. default: `None`
:param deprecated: declares an endpoint deprecated. default: `None`
:param by_alias: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_unset: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_defaults: pydantic schema filters applied to `response` schema object. default: `False`
:param exclude_none: pydantic schema filters applied to `response` schema object. default: `False`
:param url_name: a name to an endpoint which can be resolved using `reverse` function in django. default: `None`
:param include_in_schema: indicates whether an endpoint should appear on the swagger documentation
:param permissions: collection permission classes. default: `None`
:return: Route[PATCH]
"""
return Route(
path,
methods,
auth=auth,
response=response,
operation_id=operation_id,
summary=summary,
description=description,
tags=tags,
deprecated=deprecated,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
url_name=url_name,
include_in_schema=include_in_schema,
permissions=permissions,
)
class Route(object):
"""
APIController Class Route definition method decorator
That converts class instance methods to `RouteFunction(s) | AsyncRouteFunction(s)`
"""
permissions: Optional[Optional[List[Type[BasePermission]]]] = None
get = http_get
patch = http_patch
put = http_put
delete = http_delete
post = http_post
generic = http_generic
def __init__(
self,
path: str,
methods: List[str],
*,
auth: Any = NOT_SET,
response: Union[Any, List[Any]] = NOT_SET,
operation_id: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
deprecated: Optional[bool] = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
url_name: Optional[str] = None,
include_in_schema: bool = True,
permissions: Optional[List[Type[BasePermission]]] = None,
) -> None:
if not isinstance(methods, list):
raise RouteInvalidParameterException("methods must be a list")
methods = list(map(lambda m: m.upper(), methods))
not_valid_methods = list(set(methods) - set(ROUTE_METHODS))
if not_valid_methods:
raise RouteInvalidParameterException(
f"Method {','.join(not_valid_methods)} not allowed"
)
_response = response
if (
inspect.isclass(response)
and issubclass(response, ControllerResponse) # type:ignore
) or isinstance(response, ControllerResponse):
response = cast(ControllerResponse, response)
_response = {response.status_code: response.get_schema()}
elif isinstance(response, list):
_response_computed = dict()
for item in response:
if (
inspect.isclass(item) and issubclass(item, ControllerResponse)
) or isinstance(item, ControllerResponse):
_response_computed.update({item.status_code: item.get_schema()})
elif isinstance(item, dict):
_response_computed.update(item)
elif isinstance(item, tuple):
_response_computed.update({item[0]: item[1]})
if not _response_computed:
raise RouteInvalidParameterException(
f"Invalid response configuration: {response}"
)
_response = _response_computed
ninja_route_params = RouteParameter(
path=path,
methods=methods,
auth=auth,
response=_response,
operation_id=operation_id,
summary=summary,
description=description,
tags=tags,
deprecated=deprecated,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
url_name=url_name,
include_in_schema=include_in_schema,
)
self.route_params = ninja_route_params
self.is_async = False
self.permissions = permissions
def __call__(self, view_func: TCallable) -> RouteFunction:
route_function_class = RouteFunction
if is_async(view_func):
route_function_class = AsyncRouteFunction
self.view_func = view_func
return route_function_class(route=self)
route = Route
| 38.869903
| 117
| 0.673694
| 2,309
| 20,018
| 5.696405
| 0.074058
| 0.040143
| 0.051091
| 0.051091
| 0.820117
| 0.817532
| 0.803847
| 0.800882
| 0.800882
| 0.800882
| 0
| 0.000131
| 0.234389
| 20,018
| 514
| 118
| 38.945525
| 0.858084
| 0.442801
| 0
| 0.721875
| 0
| 0
| 0.015506
| 0.002759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0.003125
| 0.028125
| 0
| 0.103125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f3c9446e4dde13e8b75b75b094a48c7a9f2f6aa4
| 94
|
py
|
Python
|
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-003/pg-3.5-ex-string.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-003/pg-3.5-ex-string.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-003/pg-3.5-ex-string.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
s = '100'
print(s)
s = 'abc1234-09232<>?323'
print(s)
s = 'abc 123'
print(s)
s = ' '
print(s)
| 10.444444
| 25
| 0.553191
| 18
| 94
| 2.888889
| 0.444444
| 0.461538
| 0.403846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236842
| 0.191489
| 94
| 8
| 26
| 11.75
| 0.447368
| 0
| 0
| 0.5
| 0
| 0
| 0.319149
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
3441f211c43eefe838e3160a105247d063a0e2f4
| 6,371
|
py
|
Python
|
hivemind_bus_client/decorators.py
|
emphasize/hivemind_websocket_client
|
a6d6d11d2e5d28dd71433ffdb10e6363d3a6eb60
|
[
"Apache-2.0"
] | null | null | null |
hivemind_bus_client/decorators.py
|
emphasize/hivemind_websocket_client
|
a6d6d11d2e5d28dd71433ffdb10e6363d3a6eb60
|
[
"Apache-2.0"
] | 1
|
2022-01-20T16:09:06.000Z
|
2022-01-20T17:34:16.000Z
|
hivemind_bus_client/decorators.py
|
emphasize/hivemind_websocket_client
|
a6d6d11d2e5d28dd71433ffdb10e6363d3a6eb60
|
[
"Apache-2.0"
] | 2
|
2022-01-17T15:50:04.000Z
|
2022-02-02T00:23:52.000Z
|
from hivemind_bus_client.message import HiveMessageType
class HiveMessageListener:
def __init__(self, bus, message_type):
self.bus = bus
self.message_type = message_type
self._handlers = []
def _handler(self, message):
"""Receive response data."""
for handler in self._handlers:
handler(message)
self.bus.once(self.message_type, self._handler)
def listen(self):
self.bus.once(self.message_type, self._handler)
return self
def add_handler(self, handler):
self._handlers.append(handler)
def clear_handlers(self):
self._handlers = []
def shutdown(self):
self.bus.remove(self.message_type, self._handler)
class HivePayloadListener(HiveMessageListener):
def __init__(self, payload_type=HiveMessageType.THIRDPRTY, *args,
**kwargs):
super().__init__(*args, **kwargs)
self.payload_type = payload_type
def _handler(self, message):
"""Receive response data."""
if message.payload.msg_type == self.payload_type:
for handler in self._handlers:
handler(message.payload)
self.bus.once(self.message_type, self._handler)
def on_hive_message(message_type, bus):
# Begin wrapper
def wrapped_handler(func):
bus.on(message_type, func)
return func
return wrapped_handler
def on_mycroft_message(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.BUS)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_shared_bus(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.SHARED_BUS)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_broadcast(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.BROADCAST)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_ping(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.PING)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_propagate(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.PROPAGATE)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_escalate(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.ESCALATE)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_handshake(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.HANDSHAKE)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_hello(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.HELLO)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_cascade(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.CASCADE)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_rendezvous(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.RENDEZVOUS)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_registry_opcode(payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=HiveMessageType.THIRDPRTY)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
# low level
def on_third_party(bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HiveMessageListener(bus=bus,
message_type=HiveMessageType.THIRDPRTY)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
def on_payload(message_type, payload_type, bus):
# Begin wrapper
def wrapped_handler(func):
waiter = HivePayloadListener(bus=bus, payload_type=payload_type,
message_type=message_type)
waiter.add_handler(func)
waiter.listen()
func.shutdown = waiter.shutdown
return func
return wrapped_handler
| 28.828054
| 77
| 0.63742
| 671
| 6,371
| 5.824143
| 0.087928
| 0.11259
| 0.113101
| 0.064483
| 0.80911
| 0.802457
| 0.802457
| 0.753582
| 0.723132
| 0.702661
| 0
| 0
| 0.288181
| 6,371
| 220
| 78
| 28.959091
| 0.861742
| 0.039554
| 0
| 0.688742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.238411
| false
| 0
| 0.006623
| 0
| 0.450331
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1b1e910859429ebd5ef2f3706111c1272d3b9847
| 93
|
py
|
Python
|
skgpytorch/metrics/__init__.py
|
palak-purohit/skgpytorch
|
f1143a0f6a4858be4485ff465b3d6da7b28067f0
|
[
"MIT"
] | 5
|
2022-01-16T00:12:48.000Z
|
2022-03-04T12:59:26.000Z
|
skgpytorch/metrics/__init__.py
|
palak-purohit/skgpytorch
|
f1143a0f6a4858be4485ff465b3d6da7b28067f0
|
[
"MIT"
] | 3
|
2022-02-25T10:52:46.000Z
|
2022-03-18T12:30:51.000Z
|
skgpytorch/metrics/__init__.py
|
palak-purohit/skgpytorch
|
f1143a0f6a4858be4485ff465b3d6da7b28067f0
|
[
"MIT"
] | null | null | null |
from .metrics import negative_log_predictive_density
from .metrics import mean_squared_error
| 31
| 52
| 0.892473
| 13
| 93
| 6
| 0.769231
| 0.282051
| 0.435897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086022
| 93
| 2
| 53
| 46.5
| 0.917647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1b6b0c7fb12cc41bda9a47aa1261c9944ec7b130
| 6,535
|
py
|
Python
|
tests/unit/protein_merger_test.py
|
MassDynamics/protein-inference
|
05cc9738a3fcd074d8e6789bb24979a9837082cf
|
[
"MIT"
] | 4
|
2020-11-25T03:08:07.000Z
|
2020-11-25T23:28:06.000Z
|
tests/unit/protein_merger_test.py
|
MassDynamics/protein-inference
|
05cc9738a3fcd074d8e6789bb24979a9837082cf
|
[
"MIT"
] | null | null | null |
tests/unit/protein_merger_test.py
|
MassDynamics/protein-inference
|
05cc9738a3fcd074d8e6789bb24979a9837082cf
|
[
"MIT"
] | 1
|
2020-11-25T04:52:04.000Z
|
2020-11-25T04:52:04.000Z
|
import unittest
from protein_inference.problem_network import ProblemNetwork
from protein_inference.inference.protein_merger import ProteinMerger
import networkx as nx
from copy import deepcopy
class ProteinMergerTest(unittest.TestCase):
def test_get_mapping_basic(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5")], score = 10)
pn = ProblemNetwork(g)
df = ProteinMerger().get_mapping(pn)
df = df.sort_values("protein")
self.assertEqual(df["peptides"][0],["1","2"])
self.assertEqual(df["peptides"][1],["2"])
def test_get_mapping_indistinguishable(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5"),("1","5")], score = 10)
pn = ProblemNetwork(g)
df = ProteinMerger().get_mapping(pn)
df = df.sort_values("protein")
self.assertEqual(df["peptides"][0],["1","2"])
self.assertEqual(df["protein"][0],["4","5"])
def test_get_named_proteins_basic(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5")], score = 10)
g.nodes["4"]["score"] = 2
g.nodes["5"]["score"] = 0
pn = ProblemNetwork(g)
df = ProteinMerger().get_named_proteins(pn)
df = df.sort_values("protein")
self.assertEqual(df["named"][0],"4")
self.assertEqual(df["named"][1],"5")
def test_get_named_proteins_indistinguishable(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5"),("1","5")], score = 10)
g.nodes["4"]["score"] = 2
g.nodes["5"]["score"] = 0
pn = ProblemNetwork(g)
df = ProteinMerger().get_named_proteins(pn)
df = df.sort_values("protein")
self.assertEqual(df["named"][0],"4")
def test_get_named_proteins_indistinguishable_tie(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5"),("1","5")], score = 10)
g.nodes["4"]["score"] = 2
g.nodes["5"]["score"] = 2
pn = ProblemNetwork(g)
df = ProteinMerger().get_named_proteins(pn)
df = df.sort_values("protein")
self.assertEqual(df["named"][0],"4")
def test_run_no_merges(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5")], score = 10)
g.nodes["4"]["score"] = 2
g.nodes["5"]["score"] = 0
pn = ProblemNetwork(g)
pn = ProteinMerger().run(pn)
self.assertEqual(len(pn.get_proteins()),2)
def test_run_a_merge(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5"),("1","5")], score = 10)
g.nodes["4"]["score"] = 2
g.nodes["5"]["score"] = 0
pn = ProblemNetwork(g)
pn = ProteinMerger().run(pn)
self.assertEqual(len(pn.get_proteins()),1)
self.assertEqual(pn.get_proteins(),["4"])
def test_run_indistinguishable_label(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5"),("1","5")], score = 10)
g.nodes["4"]["score"] = 2
g.nodes["5"]["score"] = 0
pn = ProblemNetwork(g)
pn = ProteinMerger().run(pn)
self.assertEqual(pn.network.nodes["4"]["indistinguishable"],["5"])
def test_run_isomorphic(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5"),("1","5")], score = 10)
g.nodes["4"]["score"] = 2
g.nodes["5"]["score"] = 0
pn = ProblemNetwork(g)
pn2 = deepcopy(pn)
pn1 = ProteinMerger().run(pn)
pn2 = ProteinMerger().run(pn2)
self.assertTrue(nx.is_isomorphic(pn1.network,pn2.network))
def test_run_system_wide(self):
g = nx.Graph()
g.add_nodes_from(["1"], protein = 0)
g.add_nodes_from(["2"], protein = 0)
g.add_nodes_from(["4"], protein = 1)
g.add_nodes_from(["5"], protein = 1)
g.add_edges_from([("1","4"),("2","4")], score = 1)
g.add_edges_from([("2","5"),("1","5")], score = 10)
g.nodes["4"]["score"] = 2
g.nodes["5"]["score"] = 0
pn = ProblemNetwork(g)
pn2 = deepcopy(pn)
pns = []
pns.append(ProteinMerger().run(pn))
pns.append(ProteinMerger().run(pn2))
pn1_non_par = ProteinMerger().run(pn)
pn2_non_par = ProteinMerger().run(pn2)
self.assertTrue(nx.is_isomorphic(pns[0].network, pn1_non_par.network))
self.assertTrue(nx.is_isomorphic(pns[1].network, pn2_non_par.network))
| 33.341837
| 78
| 0.537261
| 920
| 6,535
| 3.616304
| 0.069565
| 0.072137
| 0.108206
| 0.156297
| 0.808837
| 0.808837
| 0.774271
| 0.774271
| 0.746017
| 0.746017
| 0
| 0.05234
| 0.254476
| 6,535
| 196
| 79
| 33.341837
| 0.630542
| 0
| 0
| 0.771812
| 0
| 0
| 0.050031
| 0
| 0
| 0
| 0
| 0
| 0.100671
| 1
| 0.067114
| false
| 0
| 0.033557
| 0
| 0.107383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1b90f14795f0e8e4e538d25be033dbfebd2717ec
| 3,583
|
py
|
Python
|
tests/utils/test_cached_method.py
|
thatoneolib/senko
|
686d768f8bc0c69a874dba180abb85049ff473b9
|
[
"MIT"
] | null | null | null |
tests/utils/test_cached_method.py
|
thatoneolib/senko
|
686d768f8bc0c69a874dba180abb85049ff473b9
|
[
"MIT"
] | null | null | null |
tests/utils/test_cached_method.py
|
thatoneolib/senko
|
686d768f8bc0c69a874dba180abb85049ff473b9
|
[
"MIT"
] | null | null | null |
import time
import asyncio
import pytest
from utils import caching
# helpers
def nullkey(*args, **kwargs):
return None
# sync tests
def test_cached_method():
class Object:
def __init__(self, value):
self.value = value
self.cache = caching.Cache(128)
@caching.cached_method(lambda self: self.cache)
def get(self):
return self.value
o1 = Object(1)
assert o1.get() == 1
o1.value = 2
assert o1.get() == 1
o2 = Object(2)
assert o2.get() == 2
o2.value = 3
assert o2.get() == 2
def test_cached_method_args():
class Object:
def __init__(self, value):
self.value = value
self.cache = caching.Cache(128)
@caching.cached_method(lambda self: self.cache)
def get(self, arg):
return self.value
o1 = Object(1)
assert o1.get(1) == 1
o1.value = 2
assert o1.get(1) == 1
assert o1.get(2) == 2
o2 = Object(2)
assert o2.get(1) == 2
o2.value = 3
assert o2.get(1) == 2
assert o2.get(2) == 3
def test_cached_method_ignore_args():
class Object:
def __init__(self, value):
self.value = value
self.cache = caching.Cache(128)
@caching.cached_method(lambda self: self.cache, key=nullkey)
def get(self, arg):
return self.value
o1 = Object(1)
assert o1.get(1) == 1
assert o1.get(2) == 1
o1.value = 2
assert o1.get(1) == 1
assert o1.get(2) == 1
o2 = Object(1)
assert o2.get(1) == 1
assert o2.get(2) == 1
o2.value = 2
assert o2.get(1) == 1
assert o2.get(2) == 1
# async tests
def test_async_cached_method(event_loop):
class Object:
def __init__(self, value):
self.value = value
self.cache = caching.Cache(128)
@caching.cached_method(lambda self: self.cache)
async def get(self):
return self.value
o1 = Object(1)
assert asyncio.run(o1.get()) == 1
o1.value = 2
assert asyncio.run(o1.get()) == 1
o2 = Object(2)
assert asyncio.run(o2.get()) == 2
o2.value = 3
assert asyncio.run(o2.get()) == 2
def test_async_cached_method_args(event_loop):
class Object:
def __init__(self, value):
self.value = value
self.cache = caching.Cache(128)
@caching.cached_method(lambda self: self.cache)
async def get(self, arg):
return self.value
o1 = Object(1)
assert asyncio.run(o1.get(1)) == 1
o1.value = 2
assert asyncio.run(o1.get(1)) == 1
assert asyncio.run(o1.get(2)) == 2
o2 = Object(2)
assert asyncio.run(o2.get(1)) == 2
o2.value = 3
assert asyncio.run(o2.get(1)) == 2
assert asyncio.run(o2.get(2)) == 3
def test_async_cached_method_ignore_args(event_loop):
class Object:
def __init__(self, value):
self.value = value
self.cache = caching.Cache(128)
@caching.cached_method(lambda self: self.cache, key=nullkey)
async def get(self, arg):
return self.value
o1 = Object(1)
assert asyncio.run(o1.get(1)) == 1
assert asyncio.run(o1.get(2)) == 1
o1.value = 2
assert asyncio.run(o1.get(1)) == 1
assert asyncio.run(o1.get(2)) == 1
o2 = Object(1)
assert asyncio.run(o2.get(1)) == 1
assert asyncio.run(o2.get(2)) == 1
o2.value = 2
assert asyncio.run(o2.get(1)) == 1
assert asyncio.run(o2.get(2)) == 1
| 23.418301
| 68
| 0.560982
| 513
| 3,583
| 3.812866
| 0.079922
| 0.0409
| 0.147239
| 0.056237
| 0.91411
| 0.872699
| 0.860941
| 0.825665
| 0.747444
| 0.716769
| 0
| 0.067152
| 0.310075
| 3,583
| 153
| 69
| 23.418301
| 0.72411
| 0.008373
| 0
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 1
| 0.140351
| false
| 0
| 0.035088
| 0.035088
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1b9b43557398b392d4eb10d015b3c6c49ad56a9d
| 154
|
py
|
Python
|
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_0/_pkg1_1_1_0_0/__init__.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_0/_pkg1_1_1_0_0/__init__.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_0/_pkg1_1_1_0_0/__init__.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from ._mod1_1_1_0_0_0 import *
from ._mod1_1_1_0_0_1 import *
from ._mod1_1_1_0_0_2 import *
from ._mod1_1_1_0_0_3 import *
from ._mod1_1_1_0_0_4 import *
| 30.8
| 30
| 0.811688
| 40
| 154
| 2.375
| 0.2
| 0.126316
| 0.473684
| 0.526316
| 0.884211
| 0.884211
| 0.757895
| 0
| 0
| 0
| 0
| 0.222222
| 0.123377
| 154
| 5
| 31
| 30.8
| 0.481481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 11
|
94173f5016afc56d1826d076a947f15f20e6efe2
| 85
|
py
|
Python
|
__init__.py
|
acidbutter96/challenge_backend
|
d064d358b9c0bdaaec379d4a79f171324dcd22a0
|
[
"MIT"
] | null | null | null |
__init__.py
|
acidbutter96/challenge_backend
|
d064d358b9c0bdaaec379d4a79f171324dcd22a0
|
[
"MIT"
] | null | null | null |
__init__.py
|
acidbutter96/challenge_backend
|
d064d358b9c0bdaaec379d4a79f171324dcd22a0
|
[
"MIT"
] | null | null | null |
from carteiraglobal.app.request.request_from_site import request_from_site as request
| 85
| 85
| 0.905882
| 13
| 85
| 5.615385
| 0.538462
| 0.30137
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 85
| 1
| 85
| 85
| 0.9125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9447c280bf646274e8349aa3e97c11ebd03dbb62
| 29,352
|
py
|
Python
|
IA/comportamiento.py
|
Gusta2307/Football-Simulation-IA-SIM-COM-
|
8c29c5b1ef61708a4f8b34f5e0e00990aeecfacd
|
[
"MIT"
] | null | null | null |
IA/comportamiento.py
|
Gusta2307/Football-Simulation-IA-SIM-COM-
|
8c29c5b1ef61708a4f8b34f5e0e00990aeecfacd
|
[
"MIT"
] | null | null | null |
IA/comportamiento.py
|
Gusta2307/Football-Simulation-IA-SIM-COM-
|
8c29c5b1ef61708a4f8b34f5e0e00990aeecfacd
|
[
"MIT"
] | 1
|
2022-02-07T04:47:15.000Z
|
2022-02-07T04:47:15.000Z
|
import random
from classes.jugador import Jugador
from classes.portero import Portero
from config import Config
_config = Config()
#### DEFENSA ####
#Los jugadores avanzan o retrasan su posición según la zona donde se produce el saque de banda
def comportamiento_defensa_detenido_BB(partido, equipo):
zona_actual = partido.ultima_accion.agente.ubc
jugador_acciones = []
for j in equipo.jugadores_en_campo:
if j is Portero:
jugador_acciones.append(
[
j,
[
['MANTENER_POS', 1]
]
]
)
continue
if zona_actual == _config.IA.Zona.DEFENSA:
if j.ubc == _config.IA.Zona.ATAQUE:
if j.posicion == _config.POSICIONES[0]: #Delantero
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.8],
['MANTENER_POS', 0.2]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #Medio campo
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.65],
['MANTENER_POS', 0.35]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #Defensa
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.95],
['MANTENER_POS', 0.05]
]
]
)
elif j.ubc == _config.IA.Zona.CENTRO:
if j.posicion == _config.POSICIONES[0]: #Delantero
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.2],
['RETRASAR_POS', 0.2],
['MANTENER_POS', 0.6]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #Medio campo
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.1],
['RETRASAR_POS', 0.35],
['MANTENER_POS', 0.55]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #Defensa
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.05],
['RETRASAR_POS', 0.65],
['MANTENER_POS', 0.35]
]
]
)
elif j.ubc == _config.IA.Zona.DEFENSA:
if j.posicion == _config.POSICIONES[0]: #Delantero
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.75],
['MANTENER_POS', 0.25]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #Medio campo
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.45],
['MANTENER_POS', 0.55]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #Defensa
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.05],
['MANTENER_POS', 0.95]
]
]
)
elif zona_actual == _config.IA.Zona.CENTRO:
if j.ubc == _config.IA.Zona.ATAQUE:
if j.posicion == _config.POSICIONES[0]: #Delantero
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.35],
['MANTENER_POS', 0.65]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #Medio campo
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.65],
['MANTENER_POS', 0.35]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #Defensa
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.85],
['MANTENER_POS', 0.15]
]
]
)
elif j.ubc == _config.IA.Zona.CENTRO:
if j.posicion == _config.POSICIONES[0]: #Delantero
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.2],
['RETRASAR_POS', 0.05],
['MANTENER_POS', 0.75]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #Medio campo
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.1],
['RETRASAR_POS', 0.1],
['MANTENER_POS', 0.8]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #Defensa
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.05],
['RETRASAR_POS', 0.65],
['MANTENER_POS', 0.3]
]
]
)
elif j.ubc == _config.IA.Zona.DEFENSA:
if j.posicion == _config.POSICIONES[0]: #Delantero
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.85],
['MANTENER_POS', 0.15]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #Medio campo
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.65],
['MANTENER_POS', 0.35]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #Defensa
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.25],
['MANTENER_POS', 0.75]
]
]
)
elif zona_actual == _config.IA.Zona.ATAQUE:
if j.ubc == _config.IA.Zona.ATAQUE:
if j.posicion == _config.POSICIONES[0]: #Delantero
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.05],
['MANTENER_POS', 0.95]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #Medio campo
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.35],
['MANTENER_POS', 0.65]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #Defensa
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.65],
['MANTENER_POS', 0.35]
]
]
)
elif j.ubc == _config.IA.Zona.CENTRO:
if j.posicion == _config.POSICIONES[0]: #Delantero
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.65],
['RETRASAR_POS', 0.05],
['MANTENER_POS', 0.3]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #Medio campo
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.25],
['RETRASAR_POS', 0.05],
['MANTENER_POS', 0.7]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #Defensa
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.05],
['RETRASAR_POS', 0.65],
['MANTENER_POS', 0.3]
]
]
)
elif j.ubc == _config.IA.Zona.DEFENSA:
if j.posicion == _config.POSICIONES[0]: #Delantero
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.95],
['MANTENER_POS', 0.05]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #Medio campo
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.65],
['MANTENER_POS', 0.35]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #Defensa
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.35],
['MANTENER_POS', 0.65]
]
]
)
return jugador_acciones
def comportamiento_defensa_detenido_BLF(partido, equipo):
jugador_acciones = []
for j in equipo.jugadores_en_campo:
if j.ubc == _config.IA.Zona.ATAQUE:
if j.posicion == _config.POSICIONES[0]: # DEL
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.65],
['RETRAZAR_POS_DEF', 0.3],
['MANTENER_POS', 0.05]
]
]
)
elif j.posicion == _config.POSICIONES[1]: # MED
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.3],
['RETRAZAR_POS_DEF', 0.65],
['MANTENER_POS', 0.05]
]
]
)
elif j.posicion == _config.POSICIONES[2]: # DEF
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.1],
['RETRAZAR_POS_DEF', 0.85],
['MANTENER_POS', 0.05]
]
]
)
elif j.ubc == _config.IA.Zona.CENTRO:
if j.posicion == _config.POSICIONES[0]: # DEL
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.3],
['AVANZAR_POS', 0.05],
['MANTENER_POS', 0.65]
]
]
)
elif j.posicion == _config.POSICIONES[1]: # MED
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.75],
['AVANZAR_POS', 0.05],
['MANTENER_POS', 0.2]
]
]
)
elif j.posicion == _config.POSICIONES[2]: # DEF
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.85],
['AVANZAR_POS', 0.05],
['MANTENER_POS', 0.1]
]
]
)
elif j.ubc == _config.IA.Zona.DEFENSA:
if j.posicion == _config.POSICIONES[0]: # DEL
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.55],
['MANTENER_POS', 0.45]
]
]
)
elif j.posicion == _config.POSICIONES[1]: # MED
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.35],
['MANTENER_POS', 0.65]
]
]
)
elif j.posicion == _config.POSICIONES[2]: # DEF
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.05],
['MANTENER_POS', 0.95]
]
]
)
return jugador_acciones
def comportamiento_defensa_detenido_CF_ZA(partido, equipo):
jugador_acciones = []
for j in equipo.jugadores_en_campo:
if j.ubc == _config.IA.Zona.ATAQUE:
if j.posicion == _config.POSICIONES[0]: #DEL
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.15],
['MANTENER_POS', 0.85]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #MED
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.45],
['MANTENER_POS', 0.55]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #DEF
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.45],
['REGRESAR_POS', 0.35],
['MANTENER_POS', 0.2]
]
]
)
elif j.ubc == _config.IA.Zona.CENTRO:
if j.posicion == _config.POSICIONES[0]: #DEL
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.65],
['RETRASAR_POS', 0.05],
['MANTENER_POS', 0.3]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #MED
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.25],
['RETRASAR_POS', 0.05],
['MANTENER_POS', 0.7]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #DEF
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.55],
['AVANZAR_POS', 0.1],
['MANTENER_POS', 0.35]
]
]
)
elif j.ubc == _config.IA.Zona.DEFENSA:
if j.posicion == _config.POSICIONES[0]: #DEL
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.45],
['REGRESAR_POS', 0.55],
['MANTENER_POS', 0.05]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #MED
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.55],
['AVANZAR_POS_DEL', 0.3],
['MANTENER_POS', 0.15]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #DEF
jugador_acciones.append(
[
j,
[
['AVANZAR_POS_DEL', 0.05],
['AVANZAR_POS', 0.3],
['MANTENER_POS', 0.65]
]
]
)
return jugador_acciones
def comportamiento_defensa_detenido_CF_ZC(partido, equipo):
jugador_acciones = []
for j in equipo.jugadores_en_campo:
if j.ubc == _config.IA.Zona.ATAQUE:
if j.posicion == _config.POSICIONES[0]: #DEL
jugador_acciones.append(
[
j,
[
['RETRASAR_POS_DEF', 0.15],
['RETRASAR_POS', 0.25],
['MANTENER_POS', 0.6]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #MED
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.65],
['MANTENER_POS', 0.3],
['RETRASAR_POS_DEF', 0.05]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #DEF
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.3],
['REGRESAR_POS', 0.65],
['MANTENER_POS', 0.05]
]
]
)
elif j.ubc == _config.IA.Zona.CENTRO:
if j.posicion == _config.POSICIONES[0]: #DEL
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.25],
['RETRASAR_POS', 0.05],
['MANTENER_POS', 0.7]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #MED
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.15],
['RETRASAR_POS', 0.25],
['MANTENER_POS', 0.6]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #DEF
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.65],
['AVANZAR_POS', 0.05],
['MANTENER_POS', 0.3]
]
]
)
elif j.ubc == _config.IA.Zona.DEFENSA:
if j.posicion == _config.POSICIONES[0]: #DEL
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.45],
['REGRESAR_POS', 0.2],
['MANTENER_POS', 0.35]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #MED
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.55],
['AVANZAR_POS_DEL', 0.05],
['MANTENER_POS', 0.4]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #DEF
jugador_acciones.append(
[
j,
[
['AVANZAR_POS_DEL', 0.05],
['AVANZAR_POS', 0.3],
['MANTENER_POS', 0.65]
]
]
)
return jugador_acciones
def comportamiento_defensa_detenido_CF_ZD(partido, equipo):
jugador_acciones = []
for j in equipo.jugadores_en_campo:
if j.ubc == _config.IA.Zona.ATAQUE:
if j.posicion == _config.POSICIONES[0]: #DEL
jugador_acciones.append(
[
j,
[
['RETRASAR_POS_DEF', 0.25],
['RETRASAR_POS', 0.6],
['MANTENER_POS', 0.05]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #MED
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.5],
['MANTENER_POS', 0.05],
['RETRASAR_POS_DEF', 0.45]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #DEF
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.2],
['REGRESAR_POS', 0.75],
['MANTENER_POS', 0.05]
]
]
)
elif j.ubc == _config.IA.Zona.CENTRO:
if j.posicion == _config.POSICIONES[0]: #DEL
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.1],
['RETRASAR_POS', 0.2],
['MANTENER_POS', 0.7]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #MED
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.1],
['RETRASAR_POS', 0.3],
['MANTENER_POS', 0.6]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #DEF
jugador_acciones.append(
[
j,
[
['RETRASAR_POS', 0.65],
['AVANZAR_POS', 0.05],
['MANTENER_POS', 0.3]
]
]
)
elif j.ubc == _config.IA.Zona.DEFENSA:
if j.posicion == _config.POSICIONES[0]: #DEL
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.35],
['REGRESAR_POS', 0.1],
['MANTENER_POS', 0.55]
]
]
)
elif j.posicion == _config.POSICIONES[1]: #MED
jugador_acciones.append(
[
j,
[
['AVANZAR_POS', 0.55],
['AVANZAR_POS_DEL', 0.05],
['MANTENER_POS', 0.4]
]
]
)
elif j.posicion == _config.POSICIONES[2]: #DEF
jugador_acciones.append(
[
j,
[
['AVANZAR_POS_DEL', 0.05],
['AVANZAR_POS', 0.2],
['MANTENER_POS', 0.75]
]
]
)
return jugador_acciones
#### ATAQUE ####
# Balon sale por la banda
def comportamiento_ataque_detenido_BB(partido, equipo):
jugador_acciones = []
zona_actual = partido.ultima_accion.agente.ubc
jugadores_pos_actual = map(lambda x: x.ubc == zona_actual, equipo.jugadores_en_campo)
###
# CREO Q HAY Q VERIFICAR QUE EL LENGTH DE jugadores_pos_actual SEA > 0
###
jugador_saque_bb = jugadores_pos_actual[random.randint[0, len(jugadores_pos_actual) - 1]]
for j in equipo.jugadores_en_campo:
if j == jugador_saque_bb:
continue
def comportamiento_ataque_detenido_CF_ZA(partido, equipo):
pass
def comportamiento_ataque_detenido_CF_ZC(partido, equipo):
pass
def comportamiento_ataque_detenido_CF_ZD(partido, equipo):
pass
#El portero saca de portería y los otros jugadores se ubican en sus respectivas posiciones
def comportamiento_ataque_detenido_BLF_1(partido, equipo):
jugador_acciones = []
for j in equipo.jugadores_en_campo:
if j is Portero:
jugador_acciones.append(
[
j,
[
['SAQUE_PORTERIA', 1]
]
]
)
else:
jugador_acciones.append(
[
j,
[
['REGRESAR_POS', 1]
]
]
)
return jugador_acciones
def comportamiento_ataque_detenido_BLF_2(partido, equipo):
pass
##############################################################################################
| 34.942857
| 95
| 0.292723
| 1,867
| 29,352
| 4.373862
| 0.059989
| 0.074945
| 0.169728
| 0.17781
| 0.896155
| 0.870806
| 0.847294
| 0.828802
| 0.804678
| 0.786799
| 0
| 0.046764
| 0.620435
| 29,352
| 839
| 96
| 34.984505
| 0.686204
| 0.023133
| 0
| 0.494792
| 0
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014323
| false
| 0.005208
| 0.005208
| 0
| 0.027344
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ca3ea1f4cdf81178733240eae31bcd00ff276891
| 318
|
py
|
Python
|
src/prism-fruit/Games-DQL/examples/games/car/networkx/linalg/__init__.py
|
kushgrover/apt-vs-dift
|
250f64e6c442f6018cab65ec6979d9568a842f57
|
[
"MIT"
] | null | null | null |
src/prism-fruit/Games-DQL/examples/games/car/networkx/linalg/__init__.py
|
kushgrover/apt-vs-dift
|
250f64e6c442f6018cab65ec6979d9568a842f57
|
[
"MIT"
] | null | null | null |
src/prism-fruit/Games-DQL/examples/games/car/networkx/linalg/__init__.py
|
kushgrover/apt-vs-dift
|
250f64e6c442f6018cab65ec6979d9568a842f57
|
[
"MIT"
] | null | null | null |
from networkx.linalg.attrmatrix import *
import networkx.linalg.attrmatrix
from networkx.linalg.spectrum import *
import networkx.linalg.spectrum
from networkx.linalg.graphmatrix import *
import networkx.linalg.graphmatrix
from networkx.linalg.laplacianmatrix import *
import networkx.linalg.laplacianmatrix
| 31.8
| 46
| 0.830189
| 36
| 318
| 7.333333
| 0.222222
| 0.424242
| 0.272727
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106918
| 318
| 9
| 47
| 35.333333
| 0.929577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ca778292d2704f1df99c5dcc4a14348fd60b0d65
| 4,240
|
py
|
Python
|
genomics_data_index/test/unit/variant/model/db/test_NucleotideVariantsSamples.py
|
apetkau/thesis-index
|
6c96e9ed75d8e661437effe62a939727a0b473fc
|
[
"Apache-2.0"
] | 1
|
2021-04-21T00:19:49.000Z
|
2021-04-21T00:19:49.000Z
|
genomics_data_index/test/unit/variant/model/db/test_NucleotideVariantsSamples.py
|
apetkau/thesis-index
|
6c96e9ed75d8e661437effe62a939727a0b473fc
|
[
"Apache-2.0"
] | null | null | null |
genomics_data_index/test/unit/variant/model/db/test_NucleotideVariantsSamples.py
|
apetkau/thesis-index
|
6c96e9ed75d8e661437effe62a939727a0b473fc
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from genomics_data_index.storage.SampleSet import SampleSet
from genomics_data_index.storage.model.db import NucleotideVariantsSamples, MLSTAllelesSamples
def test_update_sample_ids_both_overlap():
s1 = SampleSet([1, 2])
v1 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s1)
s2 = SampleSet([2, 3])
v2 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s2)
v1.update_sample_ids(v2)
# v1 should only have sample_ids updated
assert v1.id == 'ref:10:1:A'
assert v1.spdi == 'ref:10:1:A'
assert v1.var_type == 'SNP'
assert set(v1.sample_ids) == {1, 2, 3}
# v2 should not be changed
assert v2.id == 'ref:10:1:A'
assert v2.var_type == 'SNP'
assert set(v2.sample_ids) == {2, 3}
def test_update_sample_ids_both_empty():
s1 = SampleSet.create_empty()
v1 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s1)
s2 = SampleSet.create_empty()
v2 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s2)
v1.update_sample_ids(v2)
# v1 should only have sample_ids updated
assert v1.id == 'ref:10:1:A'
assert v1.spdi == 'ref:10:1:A'
assert v1.var_type == 'SNP'
assert set(v1.sample_ids) == set()
# v2 should not be changed
assert v2.id == 'ref:10:1:A'
assert v2.var_type == 'SNP'
assert set(v2.sample_ids) == set()
def test_update_sample_ids_left_empty():
s1 = SampleSet.create_empty()
v1 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s1)
s2 = SampleSet([1, 2])
v2 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s2)
v1.update_sample_ids(v2)
# v1 should only have sample_ids updated
assert v1.id == 'ref:10:1:A'
assert v1.spdi == 'ref:10:1:A'
assert v1.var_type == 'SNP'
assert set(v1.sample_ids) == {1, 2}
# v2 should not be changed
assert v2.id == 'ref:10:1:A'
assert v2.var_type == 'SNP'
assert set(v2.sample_ids) == {1, 2}
def test_update_sample_ids_right_empty():
s1 = SampleSet([1, 2])
v1 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s1)
s2 = SampleSet.create_empty()
v2 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s2)
v1.update_sample_ids(v2)
# v1 should only have sample_ids updated
assert v1.id == 'ref:10:1:A'
assert v1.spdi == 'ref:10:1:A'
assert v1.var_type == 'SNP'
assert set(v1.sample_ids) == {1, 2}
# v2 should not be changed
assert v2.id == 'ref:10:1:A'
assert v2.var_type == 'SNP'
assert set(v2.sample_ids) == set()
def test_update_sample_ids_disjoint():
s1 = SampleSet([1, 2])
v1 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s1)
s2 = SampleSet([3, 4])
v2 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s2)
v1.update_sample_ids(v2)
# v1 should only have sample_ids updated
assert v1.id == 'ref:10:1:A'
assert v1.spdi == 'ref:10:1:A'
assert v1.var_type == 'SNP'
assert set(v1.sample_ids) == {1, 2, 3, 4}
# v2 should not be changed
assert v2.id == 'ref:10:1:A'
assert v2.var_type == 'SNP'
assert set(v2.sample_ids) == {3, 4}
def test_update_sample_ids_feature_mismatch():
s1 = SampleSet([1, 2])
v1 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s1)
s2 = SampleSet([2, 3])
v2 = MLSTAllelesSamples(sla='ecoli:abc:1', sample_ids=s2)
with pytest.raises(Exception) as execinfo:
v1.update_sample_ids(v2)
assert 'Cannot merge other' in str(execinfo.value)
assert 'since it is not of type' in str(execinfo.value)
def test_update_sample_ids_feature_id_mismatch():
s1 = SampleSet([1, 2])
v1 = NucleotideVariantsSamples(spdi='ref:10:1:A', var_type='SNP', sample_ids=s1)
s2 = SampleSet([2, 3])
v2 = NucleotideVariantsSamples(spdi='ref:10:2:A', var_type='SNP', sample_ids=s2)
with pytest.raises(Exception) as execinfo:
v1.update_sample_ids(v2)
assert 'Cannot merge other' in str(execinfo.value)
assert 'since identifiers are not equal' in str(execinfo.value)
| 30.724638
| 94
| 0.666274
| 672
| 4,240
| 4.041667
| 0.107143
| 0.142489
| 0.059647
| 0.069588
| 0.912003
| 0.883284
| 0.842784
| 0.838733
| 0.838733
| 0.838733
| 0
| 0.064788
| 0.188208
| 4,240
| 137
| 95
| 30.948905
| 0.724288
| 0.075236
| 0
| 0.744186
| 0
| 0
| 0.11509
| 0
| 0
| 0
| 0
| 0
| 0.453488
| 1
| 0.081395
| false
| 0
| 0.034884
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0460966d4052a20fc80bb8c174f9b3077b8d69de
| 80
|
py
|
Python
|
models/__init__.py
|
LyapunovStability/BRITS
|
92a889dd5946aae215d61b1854d9767c6f7fcf2c
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
LyapunovStability/BRITS
|
92a889dd5946aae215d61b1854d9767c6f7fcf2c
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
LyapunovStability/BRITS
|
92a889dd5946aae215d61b1854d9767c6f7fcf2c
|
[
"MIT"
] | null | null | null |
from models.brits import *
from models.rits import *
from models.param import *
| 20
| 26
| 0.775
| 12
| 80
| 5.166667
| 0.5
| 0.483871
| 0.516129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 80
| 3
| 27
| 26.666667
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
04f27dddd31a3487f82c2724f2a1ac77e8cc411a
| 2,947
|
py
|
Python
|
parser/team03/parse/functions/functions_string.py
|
susanliss/tytus
|
a613a2352cf4a1d0e90ce27bb346ab60ed8039cc
|
[
"MIT"
] | null | null | null |
parser/team03/parse/functions/functions_string.py
|
susanliss/tytus
|
a613a2352cf4a1d0e90ce27bb346ab60ed8039cc
|
[
"MIT"
] | null | null | null |
parser/team03/parse/functions/functions_string.py
|
susanliss/tytus
|
a613a2352cf4a1d0e90ce27bb346ab60ed8039cc
|
[
"MIT"
] | null | null | null |
import sys
from hashlib import md5, sha256
sys.path.insert(0, '..')
from ast_node import ASTNode
# From here on, classes describing aggregate functions
# TODO: Convert, SetByte, Substr
class Convert(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return True
class Decode(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return self.exp.decode('base64', 'strict')
class Encode(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return self.exp.encode('base64', 'strict')
class GetByte(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return bytes(self.exp, 'utf-8')
class Length(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return len(self.exp)
class Md5(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return md5(self.exp.encode())
class SetByte(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return True
class Sha256(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return sha256(self.exp)
class Substr(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return len(self.exp)
class Substring(ASTNode):
def __init__(self, exp, start, end, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
self.start = start
self.end = end
def execute(self, table, tree):
super().execute(table, tree)
return self.exp[self.start: self.end]
class Trim(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return self.exp.strip()
| 24.558333
| 54
| 0.617238
| 371
| 2,947
| 4.663073
| 0.134771
| 0.125434
| 0.089017
| 0.114451
| 0.781503
| 0.769364
| 0.769364
| 0.769364
| 0.769364
| 0.769364
| 0
| 0.008227
| 0.25755
| 2,947
| 120
| 55
| 24.558333
| 0.78245
| 0.028164
| 0
| 0.698795
| 0
| 0
| 0.010832
| 0
| 0
| 0
| 0
| 0.008333
| 0
| 1
| 0.26506
| false
| 0
| 0.036145
| 0
| 0.566265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
04fbe3a3cce87096e9b783c3ad7646a8d723441a
| 98
|
py
|
Python
|
bitio/src/microbit/repl/__init__.py
|
hungjuchen/Atmosmakers
|
4e8e64fba3d7a31840f69a5aa3823247aa5dca02
|
[
"MIT"
] | 85
|
2017-06-09T20:53:46.000Z
|
2022-03-09T21:35:05.000Z
|
bitio/src/microbit/repl/__init__.py
|
hungjuchen/Atmosmakers
|
4e8e64fba3d7a31840f69a5aa3823247aa5dca02
|
[
"MIT"
] | 34
|
2017-06-09T20:52:05.000Z
|
2021-02-19T19:49:45.000Z
|
bitio/src/microbit/repl/__init__.py
|
hungjuchen/Atmosmakers
|
4e8e64fba3d7a31840f69a5aa3823247aa5dca02
|
[
"MIT"
] | 32
|
2017-06-09T10:15:19.000Z
|
2021-11-20T09:08:08.000Z
|
# repl/__init__.py
try:
from repl import *
except ImportError:
from .repl import *
# END
| 12.25
| 23
| 0.663265
| 13
| 98
| 4.692308
| 0.692308
| 0.262295
| 0.459016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.244898
| 98
| 7
| 24
| 14
| 0.824324
| 0.204082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b6e18b81d4a0b197c2238603a7e13af80bcf7420
| 86
|
py
|
Python
|
{{cookiecutter.repo_name}}/tests/test_main.py
|
adamtupper/cookiecutter-lvsn-workflow
|
d2344ed1767e9eb6a566c32729b7a7e013693f30
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/tests/test_main.py
|
adamtupper/cookiecutter-lvsn-workflow
|
d2344ed1767e9eb6a566c32729b7a7e013693f30
|
[
"MIT"
] | 11
|
2021-06-09T17:24:21.000Z
|
2021-07-26T14:33:28.000Z
|
{{cookiecutter.repo_name}}/tests/test_main.py
|
adamtupper/cookiecutter-lvsn-workflow
|
d2344ed1767e9eb6a566c32729b7a7e013693f30
|
[
"MIT"
] | null | null | null |
import pytest # noqa: F401
def test_dummy():
"""A dummy test case."""
pass
| 12.285714
| 28
| 0.593023
| 12
| 86
| 4.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.267442
| 86
| 6
| 29
| 14.333333
| 0.746032
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
8e1b2123f0cf419cea41aec03b3077c77826a13f
| 182
|
py
|
Python
|
src/unittest/specification/someClass.py
|
mrdulin/python-codelab
|
3d960a14a96b3a673b7dc2277d202069b1f8e778
|
[
"MIT"
] | null | null | null |
src/unittest/specification/someClass.py
|
mrdulin/python-codelab
|
3d960a14a96b3a673b7dc2277d202069b1f8e778
|
[
"MIT"
] | null | null | null |
src/unittest/specification/someClass.py
|
mrdulin/python-codelab
|
3d960a14a96b3a673b7dc2277d202069b1f8e778
|
[
"MIT"
] | 3
|
2020-02-19T08:02:04.000Z
|
2021-06-08T13:27:51.000Z
|
class SomeClass:
def __init__(self, name='lin'):
self.name = name
# def get_name(self):
# return self.name
def get_name_new():
return self.name
| 18.2
| 35
| 0.582418
| 24
| 182
| 4.125
| 0.416667
| 0.323232
| 0.20202
| 0.282828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 182
| 9
| 36
| 20.222222
| 0.785714
| 0.21978
| 0
| 0
| 0
| 0
| 0.021583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
edcb999cd749afecb86fbcfb4607f2dc5b8eb8ce
| 16,478
|
py
|
Python
|
tests/orchestrator/test_sequential_orchestrator_with_retry.py
|
Azure/azure-functions-durable-python
|
41b7d88d38bfc19cea6249e08fd240362976374f
|
[
"MIT"
] | 78
|
2020-03-30T19:05:23.000Z
|
2022-03-30T06:55:47.000Z
|
tests/orchestrator/test_sequential_orchestrator_with_retry.py
|
Azure/azure-functions-durable-python
|
41b7d88d38bfc19cea6249e08fd240362976374f
|
[
"MIT"
] | 180
|
2020-04-01T22:25:59.000Z
|
2022-03-29T14:23:16.000Z
|
tests/orchestrator/test_sequential_orchestrator_with_retry.py
|
Azure/azure-functions-durable-python
|
41b7d88d38bfc19cea6249e08fd240362976374f
|
[
"MIT"
] | 40
|
2020-03-31T19:52:31.000Z
|
2022-02-06T05:52:44.000Z
|
from typing import List, Union
from azure.durable_functions.models.ReplaySchema import ReplaySchema
from .orchestrator_test_utils \
import get_orchestration_state_result, assert_orchestration_state_equals, assert_valid_schema
from tests.test_utils.ContextBuilder import ContextBuilder
from azure.durable_functions.models.OrchestratorState import OrchestratorState
from azure.durable_functions.models.RetryOptions import RetryOptions
from azure.durable_functions.models.actions.CallActivityWithRetryAction \
import CallActivityWithRetryAction
RETRY_OPTIONS = RetryOptions(5000, 3)
def generator_function(context):
outputs = []
retry_options = RETRY_OPTIONS
task1 = yield context.call_activity_with_retry(
"Hello", retry_options, "Tokyo")
task2 = yield context.call_activity_with_retry(
"Hello", retry_options, "Seattle")
task3 = yield context.call_activity_with_retry(
"Hello", retry_options, "London")
outputs.append(task1)
outputs.append(task2)
outputs.append(task3)
return outputs
def generator_function_concurrent_retries(context):
outputs = []
retry_options = RETRY_OPTIONS
task1 = context.call_activity_with_retry(
"Hello", retry_options, "Tokyo")
task2 = context.call_activity_with_retry(
"Hello", retry_options, "Seattle")
task3 = context.call_activity_with_retry(
"Hello", retry_options, "London")
outputs = yield context.task_all([task1, task2, task3])
return outputs
def generator_function_two_concurrent_retries_when_all(context):
outputs = []
retry_options = RETRY_OPTIONS
task1 = context.call_activity_with_retry(
"Hello", retry_options, "Tokyo")
task2 = context.call_activity_with_retry(
"Hello", retry_options, "Seattle")
outputs = yield context.task_all([task1, task2])
return outputs
def generator_function_two_concurrent_retries_when_any(context):
outputs = []
retry_options = RETRY_OPTIONS
task1 = context.call_activity_with_retry(
"Hello", retry_options, "Tokyo")
task2 = context.call_activity_with_retry(
"Hello", retry_options, "Seattle")
outputs = yield context.task_any([task1, task2])
return outputs.result
def base_expected_state(output=None, replay_schema: ReplaySchema = ReplaySchema.V1) -> OrchestratorState:
return OrchestratorState(is_done=False, actions=[], output=output, replay_schema=replay_schema.value)
def add_hello_action(state: OrchestratorState, input_: Union[List[str], str]):
retry_options = RETRY_OPTIONS
actions = []
inputs = input_
if not isinstance(input_, list):
inputs = [input_]
for input_ in inputs:
action = CallActivityWithRetryAction(
function_name='Hello', retry_options=retry_options, input_=input_)
actions.append(action)
state._actions.append(actions)
def add_hello_failed_events(
context_builder: ContextBuilder, id_: int, reason: str, details: str):
context_builder.add_task_scheduled_event(name='Hello', id_=id_)
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
context_builder.add_task_failed_event(
id_=id_, reason=reason, details=details)
def add_hello_completed_events(
context_builder: ContextBuilder, id_: int, result: str):
context_builder.add_task_scheduled_event(name='Hello', id_=id_)
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
context_builder.add_task_completed_event(id_=id_, result=result)
def add_retry_timer_events(context_builder: ContextBuilder, id_: int):
fire_at = context_builder.add_timer_created_event(id_)
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
context_builder.add_timer_fired_event(id_=id_, fire_at=fire_at)
def add_two_retriable_events_completing_out_of_order(context_builder: ContextBuilder,
failed_reason, failed_details):
## Schedule tasks
context_builder.add_task_scheduled_event(name='Hello', id_=0) # Tokyo task
context_builder.add_task_scheduled_event(name='Hello', id_=1) # Seattle task
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
## Task failures and timer-scheduling
# tasks fail "out of order"
context_builder.add_task_failed_event(
id_=1, reason=failed_reason, details=failed_details) # Seattle task
fire_at_1 = context_builder.add_timer_created_event(2) # Seattle timer
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
context_builder.add_task_failed_event(
id_=0, reason=failed_reason, details=failed_details) # Tokyo task
fire_at_2 = context_builder.add_timer_created_event(3) # Tokyo timer
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
## fire timers
context_builder.add_timer_fired_event(id_=2, fire_at=fire_at_1) # Seattle timer
context_builder.add_timer_fired_event(id_=3, fire_at=fire_at_2) # Tokyo timer
## Complete events
context_builder.add_task_scheduled_event(name='Hello', id_=4) # Seattle task
context_builder.add_task_scheduled_event(name='Hello', id_=5) # Tokyo task
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
context_builder.add_task_completed_event(id_=4, result="\"Hello Seattle!\"")
context_builder.add_task_completed_event(id_=5, result="\"Hello Tokyo!\"")
def test_initial_orchestration_state():
context_builder = ContextBuilder('test_simple_function')
result = get_orchestration_state_result(
context_builder, generator_function)
expected_state = base_expected_state()
add_hello_action(expected_state, 'Tokyo')
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_tokyo_state():
context_builder = ContextBuilder('test_simple_function')
add_hello_completed_events(context_builder, 0, "\"Hello Tokyo!\"")
result = get_orchestration_state_result(
context_builder, generator_function)
expected_state = base_expected_state()
add_hello_action(expected_state, 'Tokyo')
add_hello_action(expected_state, 'Seattle')
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_failed_tokyo_with_retry():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_simple_function')
add_hello_failed_events(context_builder, 0, failed_reason, failed_details)
result = get_orchestration_state_result(
context_builder, generator_function)
expected_state = base_expected_state()
add_hello_action(expected_state, 'Tokyo')
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_failed_tokyo_with_timer_entry():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_simple_function')
add_hello_failed_events(context_builder, 0, failed_reason, failed_details)
add_retry_timer_events(context_builder, 1)
result = get_orchestration_state_result(
context_builder, generator_function)
expected_state = base_expected_state()
add_hello_action(expected_state, 'Tokyo')
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_failed_tokyo_with_failed_retry():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_simple_function')
add_hello_failed_events(context_builder, 0, failed_reason, failed_details)
add_retry_timer_events(context_builder, 1)
add_hello_failed_events(context_builder, 2, failed_reason, failed_details)
result = get_orchestration_state_result(
context_builder, generator_function)
expected_state = base_expected_state()
add_hello_action(expected_state, 'Tokyo')
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_failed_tokyo_with_failed_retry_timer_added():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_simple_function')
add_hello_failed_events(context_builder, 0, failed_reason, failed_details)
add_retry_timer_events(context_builder, 1)
add_hello_failed_events(context_builder, 2, failed_reason, failed_details)
add_retry_timer_events(context_builder, 3)
result = get_orchestration_state_result(
context_builder, generator_function)
expected_state = base_expected_state()
add_hello_action(expected_state, 'Tokyo')
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_successful_tokyo_with_failed_retry_timer_added():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_simple_function')
add_hello_failed_events(context_builder, 0, failed_reason, failed_details)
add_retry_timer_events(context_builder, 1)
add_hello_completed_events(context_builder, 2, "\"Hello Tokyo!\"")
result = get_orchestration_state_result(
context_builder, generator_function)
expected_state = base_expected_state()
add_hello_action(expected_state, 'Tokyo')
add_hello_action(expected_state, 'Seattle')
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_failed_tokyo_hit_max_attempts():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_simple_function')
add_hello_failed_events(context_builder, 0, failed_reason, failed_details)
add_retry_timer_events(context_builder, 1)
add_hello_failed_events(context_builder, 2, failed_reason, failed_details)
add_retry_timer_events(context_builder, 3)
add_hello_failed_events(context_builder, 4, failed_reason, failed_details)
add_retry_timer_events(context_builder, 5)
try:
result = get_orchestration_state_result(
context_builder, generator_function)
# expected an exception
assert False
except Exception as e:
error_label = "\n\n$OutOfProcData$:"
error_str = str(e)
expected_state = base_expected_state()
add_hello_action(expected_state, 'Tokyo')
error_msg = f'{failed_reason} \n {failed_details}'
expected_state._error = error_msg
state_str = expected_state.to_json_string()
expected_error_str = f"{error_msg}{error_label}{state_str}"
assert expected_error_str == error_str
def test_concurrent_retriable_results():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_concurrent_retriable')
add_hello_failed_events(context_builder, 0, failed_reason, failed_details)
add_hello_failed_events(context_builder, 1, failed_reason, failed_details)
add_hello_failed_events(context_builder, 2, failed_reason, failed_details)
add_retry_timer_events(context_builder, 3)
add_retry_timer_events(context_builder, 4)
add_retry_timer_events(context_builder, 5)
add_hello_completed_events(context_builder, 6, "\"Hello Tokyo!\"")
add_hello_completed_events(context_builder, 7, "\"Hello Seattle!\"")
add_hello_completed_events(context_builder, 8, "\"Hello London!\"")
result = get_orchestration_state_result(
context_builder, generator_function_concurrent_retries)
expected_state = base_expected_state()
add_hello_action(expected_state, ['Tokyo', 'Seattle', 'London'])
expected_state._output = ["Hello Tokyo!", "Hello Seattle!", "Hello London!"]
expected_state._is_done = True
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_concurrent_retriable_results_unordered_arrival():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_concurrent_retriable_unordered_results')
add_hello_failed_events(context_builder, 0, failed_reason, failed_details)
add_hello_failed_events(context_builder, 1, failed_reason, failed_details)
add_hello_failed_events(context_builder, 2, failed_reason, failed_details)
add_retry_timer_events(context_builder, 3)
add_retry_timer_events(context_builder, 4)
add_retry_timer_events(context_builder, 5)
# events arrive in non-sequential different order
add_hello_completed_events(context_builder, 8, "\"Hello London!\"")
add_hello_completed_events(context_builder, 6, "\"Hello Tokyo!\"")
add_hello_completed_events(context_builder, 7, "\"Hello Seattle!\"")
result = get_orchestration_state_result(
context_builder, generator_function_concurrent_retries)
expected_state = base_expected_state()
add_hello_action(expected_state, ['Tokyo', 'Seattle', 'London'])
expected_state._output = ["Hello Tokyo!", "Hello Seattle!", "Hello London!"]
expected_state._is_done = True
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_concurrent_retriable_results_mixed_arrival():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_concurrent_retriable_unordered_results')
# one task succeeds, the other two fail at first, and succeed on retry
add_hello_failed_events(context_builder, 1, failed_reason, failed_details)
add_hello_completed_events(context_builder, 0, "\"Hello Tokyo!\"")
add_hello_failed_events(context_builder, 2, failed_reason, failed_details)
add_retry_timer_events(context_builder, 3)
add_retry_timer_events(context_builder, 4)
add_hello_completed_events(context_builder, 6, "\"Hello London!\"")
add_hello_completed_events(context_builder, 5, "\"Hello Seattle!\"")
result = get_orchestration_state_result(
context_builder, generator_function_concurrent_retries)
expected_state = base_expected_state()
add_hello_action(expected_state, ['Tokyo', 'Seattle', 'London'])
expected_state._output = ["Hello Tokyo!", "Hello Seattle!", "Hello London!"]
expected_state._is_done = True
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_concurrent_retriable_results_alternating_taskIDs_when_all():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_concurrent_retriable_unordered_results')
add_two_retriable_events_completing_out_of_order(context_builder, failed_reason, failed_details)
result = get_orchestration_state_result(
context_builder, generator_function_two_concurrent_retries_when_all)
expected_state = base_expected_state()
add_hello_action(expected_state, ['Tokyo', 'Seattle'])
expected_state._output = ["Hello Tokyo!", "Hello Seattle!"]
expected_state._is_done = True
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
def test_concurrent_retriable_results_alternating_taskIDs_when_any():
failed_reason = 'Reasons'
failed_details = 'Stuff and Things'
context_builder = ContextBuilder('test_concurrent_retriable_unordered_results')
add_two_retriable_events_completing_out_of_order(context_builder, failed_reason, failed_details)
result = get_orchestration_state_result(
context_builder, generator_function_two_concurrent_retries_when_any)
expected_state = base_expected_state()
add_hello_action(expected_state, ['Tokyo', 'Seattle'])
expected_state._output = "Hello Seattle!"
expected_state._is_done = True
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
| 39.047393
| 105
| 0.771453
| 2,017
| 16,478
| 5.826971
| 0.078334
| 0.129839
| 0.083383
| 0.044669
| 0.858504
| 0.837573
| 0.799966
| 0.773164
| 0.762188
| 0.72841
| 0
| 0.006193
| 0.147469
| 16,478
| 422
| 106
| 39.047393
| 0.830439
| 0.022211
| 0
| 0.705128
| 0
| 0
| 0.071287
| 0.014419
| 0
| 0
| 0
| 0
| 0.086538
| 1
| 0.073718
| false
| 0
| 0.022436
| 0.003205
| 0.112179
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
61118be652ccce475b83031eec628fa91e9d4b50
| 1,804
|
py
|
Python
|
tests/test_organisations.py
|
nestauk/gtr
|
5a7fe88c8429fa78199fb2da42123a7079a5f8ab
|
[
"Apache-2.0"
] | 6
|
2016-06-08T11:41:45.000Z
|
2018-09-12T09:54:08.000Z
|
tests/test_organisations.py
|
nestauk/gtr
|
5a7fe88c8429fa78199fb2da42123a7079a5f8ab
|
[
"Apache-2.0"
] | 2
|
2018-02-14T19:34:57.000Z
|
2018-02-14T19:46:02.000Z
|
tests/test_organisations.py
|
nestauk/gtr
|
5a7fe88c8429fa78199fb2da42123a7079a5f8ab
|
[
"Apache-2.0"
] | 2
|
2017-11-07T15:38:39.000Z
|
2018-02-14T19:10:36.000Z
|
import responses
import gtr
@responses.activate
def test_org():
"Searching by org id works"
with open("tests/results.json") as results:
body = results.read()
responses.add(
responses.GET,
"http://gtr.rcuk.ac.uk/gtr/api/organisations/test",
match_querystring=True,
status=200,
body=body,
content_type="application/json")
res = gtr.Organisations().org("test")
assert res.status_code == 200
assert sorted(res.json().keys()) == ["a",
"b",
"c",
"d"]
@responses.activate
def test_orgs():
"Searching for organisations works"
with open("tests/results.json") as results:
body = results.read()
responses.add(
responses.GET,
"http://gtr.rcuk.ac.uk/gtr/api/organisations?q=test&f=org.pro.t",
match_querystring=True,
status=200,
body=body,
content_type="application/json")
res = gtr.Organisations().orgs("test", field="title")
assert res.status_code == 200
assert sorted(res.json().keys()) == ["a",
"b",
"c",
"d"]
responses.add(
responses.GET,
"http://gtr.rcuk.ac.uk/gtr/api/organisations?q=test&f=org.n",
match_querystring=True,
status=200,
body=body,
content_type="application/json")
res = gtr.Organisations().orgs("test")
assert res.status_code == 200
assert sorted(res.json().keys()) == ["a",
"b",
"c",
"d"]
| 28.1875
| 73
| 0.478936
| 183
| 1,804
| 4.661202
| 0.289617
| 0.042204
| 0.073857
| 0.084408
| 0.84408
| 0.84408
| 0.84408
| 0.84408
| 0.84408
| 0.84408
| 0
| 0.016334
| 0.389135
| 1,804
| 64
| 74
| 28.1875
| 0.757713
| 0.032705
| 0
| 0.764706
| 0
| 0.039216
| 0.187812
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.039216
| false
| 0
| 0.039216
| 0
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6115bd1aef82af1a73b40d490a82308bf768e176
| 2,041
|
py
|
Python
|
tests/timesheet/test_ignored_entries.py
|
simonbru/taxi
|
3940f520b6d61b5ac7c851c38dfd05da2f65b647
|
[
"WTFPL"
] | 17
|
2016-02-02T14:10:49.000Z
|
2021-11-30T00:04:29.000Z
|
tests/timesheet/test_ignored_entries.py
|
simonbru/taxi
|
3940f520b6d61b5ac7c851c38dfd05da2f65b647
|
[
"WTFPL"
] | 70
|
2015-01-08T17:02:42.000Z
|
2021-09-21T20:08:07.000Z
|
tests/timesheet/test_ignored_entries.py
|
simonbru/taxi
|
3940f520b6d61b5ac7c851c38dfd05da2f65b647
|
[
"WTFPL"
] | 8
|
2015-08-23T12:50:36.000Z
|
2021-11-26T10:33:45.000Z
|
import datetime
from . import create_timesheet
def test_entry_with_question_mark_description_is_ignored():
t = create_timesheet('10.10.2012\nfoo 2 ?')
assert list(t.entries.values())[0][0].ignored
def test_entry_with_question_mark_in_alias_is_ignored():
t = create_timesheet('10.10.2012\nfoo? 2 Foo')
assert list(t.entries.values())[0][0].ignored
def test_entry_without_question_mark_in_alias_is_not_ignored():
t = create_timesheet('10.10.2012\nfoo 2 Foo')
assert not list(t.entries.values())[0][0].ignored
def test_add_ignored_flag_to_alias_makes_entry_ignored():
t = create_timesheet('10.10.2012\nfoo 2 Foo')
t.entries[datetime.date(2012, 10, 10)][0].ignored = True
assert list(t.entries.values())[0][0].ignored
def test_entry_without_start_time_following_duration_is_ignored():
contents = """10.10.2012
foo 0900-1000 baz
bar 2 bar
foo -1200 bar"""
t = create_timesheet(contents)
assert list(t.entries.values())[0][2].ignored
def test_entry_without_start_time_without_previous_entry_is_ignored():
contents = """10.10.2012
foo -1000 baz"""
t = create_timesheet(contents)
assert list(t.entries.values())[0][0].ignored
def test_entry_without_start_time_after_previous_entry_without_end_time_is_ignored():
contents = """10.10.2012
foo 0900-1000 baz
bar 1000-? bar
foo -1200 bar"""
t = create_timesheet(contents)
assert list(t.entries.values())[0][2].ignored
def test_entry_without_end_time_is_ignored():
contents = "10.10.2012\nfoo 1400-? Foo"
t = create_timesheet(contents)
assert list(t.entries.values())[0][0].ignored
def test_add_ignored_flag_to_alias_makes_to_lines_output_question_mark():
t = create_timesheet('10.10.2012\nfoo 2 Foo')
t.entries[datetime.date(2012, 10, 10)][0].alias = 'foo?'
assert t.entries.to_lines()[-1] == 'foo? 2 Foo'
def test_entry_with_zero_duration_is_ignored():
contents = "10.10.2012\nfoo 0 Foo"
t = create_timesheet(contents)
assert list(t.entries.values())[0][0].ignored
| 30.014706
| 85
| 0.729544
| 325
| 2,041
| 4.286154
| 0.163077
| 0.034458
| 0.11486
| 0.116296
| 0.847093
| 0.826992
| 0.795406
| 0.733668
| 0.733668
| 0.733668
| 0
| 0.091322
| 0.136208
| 2,041
| 67
| 86
| 30.462687
| 0.698809
| 0
| 0
| 0.5
| 0
| 0
| 0.149927
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 1
| 0.217391
| false
| 0
| 0.043478
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6128e85ec4fb129f97269ce10781864322218473
| 726
|
py
|
Python
|
Day4/Clonig_list.py
|
tushartrip1010/100_days_code_py
|
ee74b429e98cdd8bdf8661cf987da67c9fee5a3e
|
[
"Apache-2.0"
] | null | null | null |
Day4/Clonig_list.py
|
tushartrip1010/100_days_code_py
|
ee74b429e98cdd8bdf8661cf987da67c9fee5a3e
|
[
"Apache-2.0"
] | null | null | null |
Day4/Clonig_list.py
|
tushartrip1010/100_days_code_py
|
ee74b429e98cdd8bdf8661cf987da67c9fee5a3e
|
[
"Apache-2.0"
] | null | null | null |
# Approach 1:
def Cloning_List(Given_List):
Result = Given_List[:]
return Result
Given_List = [4, 5, 7, 8, 9, 6, 10, 15]
print(Cloning_List(Given_List))
# Approach 2:
def Cloning_List(Given_List):
Result = []
Result.extend(Given_List)
return Result
Given_List = [4, 5, 7, 8, 9, 6, 10, 15]
print(Cloning_List(Given_List))
# Approach 3:
def Cloning_List(Given_List):
Result = list(Given_List)
return Result
Given_List = [4, 5, 7, 8, 9, 6, 10, 15]
print(Cloning_List(Given_List))
# Approach 4:
def Cloning_List(Given_List):
Result = Given_List
return Result
Given_List = [4, 5, 7, 8, 9, 6, 10, 15]
print(Cloning_List(Given_List))
| 16.133333
| 40
| 0.62259
| 111
| 726
| 3.855856
| 0.18018
| 0.336449
| 0.273364
| 0.373832
| 0.934579
| 0.934579
| 0.799065
| 0.799065
| 0.799065
| 0.799065
| 0
| 0.081181
| 0.253444
| 726
| 44
| 41
| 16.5
| 0.708487
| 0.064738
| 0
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0
| 0
| 0.380952
| 0.190476
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
b6782d80f5548f37bf9ad97bbbab0dbf90509601
| 53,990
|
py
|
Python
|
process_results/process_results_superpatches.py
|
ShahiraAbousamra/til_classification
|
cede5453cb46b9c168a1f50f76ded43f8ca3fcbe
|
[
"BSD-3-Clause"
] | 2
|
2022-03-25T15:58:09.000Z
|
2022-03-26T11:28:44.000Z
|
process_results/process_results_superpatches.py
|
ShahiraAbousamra/til_classification
|
cede5453cb46b9c168a1f50f76ded43f8ca3fcbe
|
[
"BSD-3-Clause"
] | null | null | null |
process_results/process_results_superpatches.py
|
ShahiraAbousamra/til_classification
|
cede5453cb46b9c168a1f50f76ded43f8ca3fcbe
|
[
"BSD-3-Clause"
] | 2
|
2022-03-16T00:45:08.000Z
|
2022-03-23T17:28:39.000Z
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as CM
import numpy as np;
import pickle;
import os;
import sys;
#import matplotlib.pyplot as plt;
import seaborn as sns
def process_results_hist(in_dir, out_dir, model_prefix, dataset_name, threshold):
result_files_prefix = os.path.join(in_dir, model_prefix);
out_files_prefix = os.path.join(in_dir, model_prefix);
lbl = np.load(result_files_prefix + '_individual_labels.npy', allow_pickle=True);
pred = np.load(result_files_prefix + '_pred_new.npy', allow_pickle=True);
# label values are: 1, 2, 3, 4,; empty, ignore value of 4 and empty
lbl[np.where(l==4)] = 0 ;
# to get the average score label need to get the count of scores available for each super patch
b = lbl>0;
n = b.sum(axis = 1);
n[np.where(n ==0)] = -1; # set to -1 the count = 0 to avoid division by zero
# get the average score label by summing each patch scores and divide by count then round
lbl2 = np.divide(lbl.sum(axis = 1), n);
lbl2 = np.round(lbl2);
# get the sub patches that are predicted positive according to threshold
# the pred is super patch -> sub patch -> logit neg, logit pos
pred= pred.squeeze();
pred = pred[:,:,1] ;
pred_b = pred > threshold ;
# get the number of subpatches predicted positive in each superpatch
pred_n = pred_b.sum(axis = 1)
# get the number of subpatches predicted positive in each superpatch in each score label category 1,2,3
pred_n1 = pred_n[np.where(lbl2 == 1)]
pred_n2 = pred_n[np.where(lbl2 == 2)]
pred_n3 = pred_n[np.where(lbl2 == 3)]
# Calculate the histogram of the pos count in each label category
hist1 = np.histogram(pred_n1, bins=np.arange(0,65, 5))
hist1[0].dump(out_files_prefix + '_' + dataset_name + '_hist1y_step5.npy');
hist1[1].dump(out_files_prefix + '_' + dataset_name + '_hist1x_step5.npy');
hist2 = np.histogram(pred_n2, bins=np.arange(0,65, 5))
hist2[0].dump(out_files_prefix + '_' + dataset_name + '_hist2y_step5.npy');
hist2[1].dump(out_files_prefix + '_' + dataset_name + '_hist2x_step5.npy');
hist3 = np.histogram(pred_n3, bins=np.arange(0,65, 5))
hist3[0].dump(out_files_prefix + '_' + dataset_name + '_hist3y_step5.npy');
hist3[1].dump(out_files_prefix + '_' + dataset_name + '_hist3x_step5.npy');
# Visualize the histograms
for i in range(1,4):
histy = np.load(out_files_prefix + '_' + dataset_name + '_hist'+str(i)+'y_step5.npy', allow_pickle=True)
histx = np.load(out_files_prefix + '_' + dataset_name + '_hist'+str(i)+'x_step5.npy', allow_pickle=True)
plt.bar(histx_s5[1:], histy_s5)
#plt.plot(histx_s5[1:], histy_s5, label="inc")
plt.plot(histx_s5[1:], histy_s5)
plt.legend()
plt.xticks(np.arange(0,histx_s5[-1]+1,5))
plt.show();
return;
def process_le_results_violin_use_anno_csv_outraw_merged(out_dir, anno_filepath_csv, dataset_name, title_line=None):
#result_files_prefix = os.path.join(in_dir, model_prefix );
anno_arr = np.loadtxt(anno_filepath_csv, delimiter=',', dtype=str)
anno_arr = np.delete(anno_arr , 0, axis=0)
anno_ctype = anno_arr[:, 0]
anno_filepath = anno_arr[:, 1:3]
anno_lbl = anno_arr[:, 3:-1]
anno_arr_lbl_full = anno_arr[:, 3:-1]
print('anno_lbl',anno_lbl.shape)
anno_lbl[anno_lbl=='']='0'
anno_lbl[anno_lbl==' ']='0'
anno_lbl = anno_lbl.astype(int)
anno_lbl[np.where(anno_lbl==4)] = 0 ;
# to get the average score label need to get the count of scores available for each super patch
b = anno_lbl>0;
n = b.sum(axis = 1);
n[np.where(n ==0)] = -1; # set to -1 the count = 0 to avoid division by zero
# get the average score label by summing each patch scores and divide by count then round
anno_lbl2 = np.divide(anno_lbl.sum(axis = 1), n);
anno_lbl2 = np.round(anno_lbl2);
ctypes = np.unique(anno_ctype)
pred_patch_count = anno_arr[:,-1]
pred_patch_count_n1 = pred_patch_count[np.where(anno_lbl2== 1)]
pred_patch_count_n2 = pred_patch_count[np.where(anno_lbl2== 2)]
pred_patch_count_n3 = pred_patch_count[np.where(anno_lbl2== 3)]
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+'all'+'.png'));
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10', scale='width')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+'all'+'_samewidth'+'.png'));
if(title_line is None):
out_filepath_all = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+'all'+'_full_label.txt')
else:
out_filepath_all = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+'all'+'_full_label_wtitle.txt')
if((not(title_line is None)) and (not os.path.exists(out_filepath_all))):
with open(out_filepath_all, 'a') as file:
file.write(title_line + '\n')
ctypes = np.unique(anno_ctype)
for ctype in ctypes:
anno_lbl2_ctype = anno_lbl2[np.where(anno_ctype == ctype)]
anno_arr_lbl_full_ctype = anno_arr_lbl_full[np.where(anno_ctype == ctype)]
#pred_cell_count_ctype = pred_cell_count[np.where(anno_ctype == ctype)]
pred_patch_count_ctype = pred_patch_count[np.where(anno_ctype == ctype)]
anno_arr_ctype = anno_arr[np.where(anno_ctype == ctype)]
#pred_cell_count_n1 = pred_cell_count_ctype[np.where(anno_lbl2_ctype == 1)]
#pred_cell_count_n2 = pred_cell_count_ctype[np.where(anno_lbl2_ctype == 2)]
#pred_cell_count_n3 = pred_cell_count_ctype[np.where(anno_lbl2_ctype == 3)]
pred_patch_count_n1 = pred_patch_count_ctype[np.where(anno_lbl2_ctype == 1)]
pred_patch_count_n2 = pred_patch_count_ctype[np.where(anno_lbl2_ctype == 2)]
pred_patch_count_n3 = pred_patch_count_ctype[np.where(anno_lbl2_ctype == 3)]
##fig,ax = plt.subplots(1)
##sns.set(style="whitegrid")
##ax = sns.violinplot(data=[pred_cell_count_n1,pred_cell_count_n2,pred_cell_count_n3], cut=0, ax=ax, palette='tab10')
##ax.set(xticklabels=['low', 'medium', 'high'])
##fig.savefig(os.path.join(out_dir, 'violin'+'_cell_count_'+ctype+'.png'));
#fig,ax = plt.subplots(1)
#sns.set(style="whitegrid")
#ax = sns.violinplot(data=[pred_cell_count_n1,pred_cell_count_n2,pred_cell_count_n3], cut=0, ax=ax, palette='tab10', scale='width')
#ax.set(xticklabels=['low', 'medium', 'high'])
#fig.savefig(os.path.join(out_dir, 'violin'+'_cell_count_'+ctype+'_samewidth'+'.png'));
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+ctype+'.png'));
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10', scale='width')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+ctype+'_samewidth'+'.png'));
if(title_line is None):
out_filepath = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+ctype+'_full_label.txt')
else:
out_filepath = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+ctype+'_full_label_wtitle.txt')
with open(out_filepath, 'w') as file:
if(not(title_line is None)):
file.write(title_line + '\n')
for i in range(anno_arr_ctype.shape[0]):
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) +'\n') ;
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) ) ;
file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_patch_count_ctype[i]) ) ;
for j in range(anno_arr_lbl_full_ctype.shape[-1]):
file.write(',' + anno_arr_lbl_full_ctype[i,j]) ;
file.write('\n') ;
# all ctypes together in one file
with open(out_filepath_all, 'a') as file:
for i in range(anno_arr_ctype.shape[0]):
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) +'\n') ;
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) ) ;
file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_patch_count_ctype[i]) ) ;
for j in range(anno_arr_lbl_full_ctype.shape[-1]):
file.write(',' + anno_arr_lbl_full_ctype[i,j]) ;
file.write('\n') ;
def process_results_violin_use_anno_csv_outraw_merged(in_dir, out_dir, anno_filepath_csv, model_prefix, dataset_name, threshold, title_line=None):
result_files_prefix = os.path.join(in_dir, model_prefix );
anno_arr = np.loadtxt(anno_filepath_csv, delimiter=',', dtype=str)
anno_arr = np.delete(anno_arr , 0, axis=0)
anno_ctype = anno_arr[:, 0]
anno_filepath = anno_arr[:, 1:3]
anno_lbl = anno_arr[:, 3:-1]
anno_arr_lbl_full = anno_arr[:, 3:-1]
print('anno_lbl',anno_lbl.shape)
anno_lbl[anno_lbl=='']='0'
anno_lbl[anno_lbl==' ']='0'
anno_lbl = anno_lbl.astype(int)
anno_lbl[np.where(anno_lbl==4)] = 0 ;
# to get the average score label need to get the count of scores available for each super patch
b = anno_lbl>0;
n = b.sum(axis = 1);
n[np.where(n ==0)] = -1; # set to -1 the count = 0 to avoid division by zero
# get the average score label by summing each patch scores and divide by count then round
anno_lbl2 = np.divide(anno_lbl.sum(axis = 1), n);
anno_lbl2 = np.round(anno_lbl2);
ctypes = np.unique(anno_ctype)
#pred = np.load(result_files_prefix + '_pred_new.npy');
if(os.path.isfile(result_files_prefix + '_pred_new.npy')):
pred = np.load(result_files_prefix + '_pred_new.npy', allow_pickle=True);
elif(os.path.isfile(result_files_prefix + '_pred_prob.npy')):
pred = np.load(result_files_prefix + '_pred_prob.npy', allow_pickle=True);
filenames = np.array(pickle.load(open(result_files_prefix + '_filename.pkl', 'rb')));
print(filenames, filenames.shape)
#print('pred.shape = ', pred.shape)
pred= pred.squeeze();
print('pred.shape = ', pred.shape)
#pred = pred[:,:,1] ;
if(len(pred.shape) > 2 and pred.shape[2]>1):
pred = pred[:,:,1];
elif(len(pred.shape) > 2 and pred.shape[2]==1):
pred = pred[:,:,0];
elif(len(pred.shape) == 2):
pred = pred;
pred_b = pred > threshold ;
# get the number of subpatches predicted positive in each superpatch
pred_n = pred_b.sum(axis = 1)
pred_patch_count = np.zeros(anno_lbl2.shape)
for i in range(anno_arr.shape[0]):
filename = anno_filepath[i,0] + '_' + anno_filepath[i,1] + '.png'
print(filename)
print(pred_n[filenames==filename])
pred_patch_count[i] = pred_n[filenames==filename]
pred_patch_count_n1 = pred_patch_count[np.where(anno_lbl2== 1)]
pred_patch_count_n2 = pred_patch_count[np.where(anno_lbl2== 2)]
pred_patch_count_n3 = pred_patch_count[np.where(anno_lbl2== 3)]
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+'all'+'.png'));
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10', scale='width')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+'all'+'_samewidth'+'.png'));
if(title_line is None):
out_filepath_all = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+'all'+'_full_label.txt')
else:
out_filepath_all = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+'all'+'_full_label_wtitle.txt')
if((not(title_line is None)) and (not os.path.exists(out_filepath_all))):
with open(out_filepath_all, 'a') as file:
file.write(title_line + '\n')
ctypes = np.unique(anno_ctype)
for ctype in ctypes:
anno_lbl2_ctype = anno_lbl2[np.where(anno_ctype == ctype)]
anno_arr_lbl_full_ctype = anno_arr_lbl_full[np.where(anno_ctype == ctype)]
#pred_cell_count_ctype = pred_cell_count[np.where(anno_ctype == ctype)]
pred_patch_count_ctype = pred_patch_count[np.where(anno_ctype == ctype)]
anno_arr_ctype = anno_arr[np.where(anno_ctype == ctype)]
#pred_cell_count_n1 = pred_cell_count_ctype[np.where(anno_lbl2_ctype == 1)]
#pred_cell_count_n2 = pred_cell_count_ctype[np.where(anno_lbl2_ctype == 2)]
#pred_cell_count_n3 = pred_cell_count_ctype[np.where(anno_lbl2_ctype == 3)]
pred_patch_count_n1 = pred_patch_count_ctype[np.where(anno_lbl2_ctype == 1)]
pred_patch_count_n2 = pred_patch_count_ctype[np.where(anno_lbl2_ctype == 2)]
pred_patch_count_n3 = pred_patch_count_ctype[np.where(anno_lbl2_ctype == 3)]
##fig,ax = plt.subplots(1)
##sns.set(style="whitegrid")
##ax = sns.violinplot(data=[pred_cell_count_n1,pred_cell_count_n2,pred_cell_count_n3], cut=0, ax=ax, palette='tab10')
##ax.set(xticklabels=['low', 'medium', 'high'])
##fig.savefig(os.path.join(out_dir, 'violin'+'_cell_count_'+ctype+'.png'));
#fig,ax = plt.subplots(1)
#sns.set(style="whitegrid")
#ax = sns.violinplot(data=[pred_cell_count_n1,pred_cell_count_n2,pred_cell_count_n3], cut=0, ax=ax, palette='tab10', scale='width')
#ax.set(xticklabels=['low', 'medium', 'high'])
#fig.savefig(os.path.join(out_dir, 'violin'+'_cell_count_'+ctype+'_samewidth'+'.png'));
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+ctype+'.png'));
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10', scale='width')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+ctype+'_samewidth'+'.png'));
if(title_line is None):
out_filepath = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+ctype+'_full_label.txt')
else:
out_filepath = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+ctype+'_full_label_wtitle.txt')
with open(out_filepath, 'w') as file:
if(not(title_line is None)):
file.write(title_line + '\n')
for i in range(anno_arr_ctype.shape[0]):
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) +'\n') ;
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) ) ;
file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_patch_count_ctype[i]) ) ;
for j in range(anno_arr_lbl_full_ctype.shape[-1]):
file.write(',' + anno_arr_lbl_full_ctype[i,j]) ;
file.write('\n') ;
# all ctypes together in one file
with open(out_filepath_all, 'a') as file:
for i in range(anno_arr_ctype.shape[0]):
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) +'\n') ;
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) ) ;
file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_patch_count_ctype[i]) ) ;
for j in range(anno_arr_lbl_full_ctype.shape[-1]):
file.write(',' + anno_arr_lbl_full_ctype[i,j]) ;
file.write('\n') ;
def process_results_violin_use_anno_csv_outraw_reviewed(in_dir, out_dir, anno_filepath_csv, model_prefix, dataset_name, threshold, title_line=None):
result_files_prefix = os.path.join(in_dir, model_prefix );
anno_arr = np.loadtxt(anno_filepath_csv, delimiter=',', dtype=str)
anno_arr = np.delete(anno_arr , 0, axis=0)
anno_ctype = anno_arr[:, 0]
anno_filepath = anno_arr[:, 1]
anno_lbl = anno_arr[:, 2:5]
anno_arr_lbl_full = anno_arr[:, 2:5]
print('anno_lbl',anno_lbl.shape)
anno_lbl[anno_lbl=='']='0'
anno_lbl[anno_lbl==' ']='0'
anno_lbl = anno_lbl.astype(int)
anno_lbl[np.where(anno_lbl==4)] = 0 ;
# to get the average score label need to get the count of scores available for each super patch
b = anno_lbl>0;
n = b.sum(axis = 1);
n[np.where(n ==0)] = -1; # set to -1 the count = 0 to avoid division by zero
# get the average score label by summing each patch scores and divide by count then round
anno_lbl2 = np.divide(anno_lbl.sum(axis = 1), n);
anno_lbl2 = np.round(anno_lbl2);
ctypes = np.unique(anno_ctype)
#pred = np.load(result_files_prefix + '_pred_new.npy');
if(os.path.isfile(result_files_prefix + '_pred_new.npy')):
pred = np.load(result_files_prefix + '_pred_new.npy', allow_pickle=True);
elif(os.path.isfile(result_files_prefix + '_pred_prob.npy')):
pred = np.load(result_files_prefix + '_pred_prob.npy', allow_pickle=True);
filenames = np.array(pickle.load(open(result_files_prefix + '_filename.pkl', 'rb')));
print(filenames, filenames.shape)
#print('pred.shape = ', pred.shape)
pred= pred.squeeze();
print('pred.shape = ', pred.shape)
#pred = pred[:,:,1] ;
if(len(pred.shape) > 2 and pred.shape[2]>1):
pred = pred[:,:,1];
elif(len(pred.shape) > 2 and pred.shape[2]==1):
pred = pred[:,:,0];
elif(len(pred.shape) == 2):
pred = pred;
pred_b = pred > threshold ;
# get the number of subpatches predicted positive in each superpatch
pred_n = pred_b.sum(axis = 1)
pred_patch_count = np.zeros(anno_lbl2.shape)
for i in range(anno_arr.shape[0]):
filename = anno_filepath[i]
print(filename)
print(pred_n[filenames==filename])
pred_patch_count[i] = pred_n[filenames==filename]
pred_patch_count_n1 = pred_patch_count[np.where(anno_lbl2== 1)]
pred_patch_count_n2 = pred_patch_count[np.where(anno_lbl2== 2)]
pred_patch_count_n3 = pred_patch_count[np.where(anno_lbl2== 3)]
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+'all'+'.png'));
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10', scale='width')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+'all'+'_samewidth'+'.png'));
if(title_line is None):
out_filepath_all = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+'all'+'_full_label.txt')
else:
out_filepath_all = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+'all'+'_full_label_wtitle.txt')
if((not(title_line is None)) and (not os.path.exists(out_filepath_all))):
with open(out_filepath_all, 'a') as file:
file.write(title_line + '\n')
ctypes = np.unique(anno_ctype)
for ctype in ctypes:
anno_lbl2_ctype = anno_lbl2[np.where(anno_ctype == ctype)]
anno_arr_lbl_full_ctype = anno_arr_lbl_full[np.where(anno_ctype == ctype)]
#pred_cell_count_ctype = pred_cell_count[np.where(anno_ctype == ctype)]
pred_patch_count_ctype = pred_patch_count[np.where(anno_ctype == ctype)]
anno_arr_ctype = anno_arr[np.where(anno_ctype == ctype)]
#pred_cell_count_n1 = pred_cell_count_ctype[np.where(anno_lbl2_ctype == 1)]
#pred_cell_count_n2 = pred_cell_count_ctype[np.where(anno_lbl2_ctype == 2)]
#pred_cell_count_n3 = pred_cell_count_ctype[np.where(anno_lbl2_ctype == 3)]
pred_patch_count_n1 = pred_patch_count_ctype[np.where(anno_lbl2_ctype == 1)]
pred_patch_count_n2 = pred_patch_count_ctype[np.where(anno_lbl2_ctype == 2)]
pred_patch_count_n3 = pred_patch_count_ctype[np.where(anno_lbl2_ctype == 3)]
##fig,ax = plt.subplots(1)
##sns.set(style="whitegrid")
##ax = sns.violinplot(data=[pred_cell_count_n1,pred_cell_count_n2,pred_cell_count_n3], cut=0, ax=ax, palette='tab10')
##ax.set(xticklabels=['low', 'medium', 'high'])
##fig.savefig(os.path.join(out_dir, 'violin'+'_cell_count_'+ctype+'.png'));
#fig,ax = plt.subplots(1)
#sns.set(style="whitegrid")
#ax = sns.violinplot(data=[pred_cell_count_n1,pred_cell_count_n2,pred_cell_count_n3], cut=0, ax=ax, palette='tab10', scale='width')
#ax.set(xticklabels=['low', 'medium', 'high'])
#fig.savefig(os.path.join(out_dir, 'violin'+'_cell_count_'+ctype+'_samewidth'+'.png'));
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+ctype+'.png'));
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
ax = sns.violinplot(data=[pred_patch_count_n1,pred_patch_count_n2,pred_patch_count_n3], cut=0, ax=ax, palette='tab10', scale='width')
ax.set(xticklabels=['low', 'medium', 'high'])
fig.savefig(os.path.join(out_dir, 'violin'+'_patch_count_'+ctype+'_samewidth'+'.png'));
if(title_line is None):
out_filepath = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+ctype+'_full_label.txt')
else:
out_filepath = os.path.join(out_dir, 'superpatches' + '_lbl_n_pred_'+ctype+'_full_label_wtitle.txt')
with open(out_filepath, 'w') as file:
if(not(title_line is None)):
file.write(title_line + '\n')
for i in range(anno_arr_ctype.shape[0]):
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) +'\n') ;
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) ) ;
file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_patch_count_ctype[i]) ) ;
for j in range(anno_arr_lbl_full_ctype.shape[-1]):
file.write(',' + anno_arr_lbl_full_ctype[i,j]) ;
file.write('\n') ;
# all ctypes together in one file
with open(out_filepath_all, 'a') as file:
for i in range(anno_arr_ctype.shape[0]):
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) +'\n') ;
#file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_cell_count_ctype[i]) + ',' + str(pred_patch_count_ctype[i]) ) ;
file.write(anno_arr_ctype[i,1]+'_'+anno_arr_ctype[i,2] + ','+str(int(anno_lbl2_ctype[i]))+ ',' + ctype + ',' + str(pred_patch_count_ctype[i]) ) ;
for j in range(anno_arr_lbl_full_ctype.shape[-1]):
file.write(',' + anno_arr_lbl_full_ctype[i,j]) ;
file.write('\n') ;
def process_results_violin_use_anno_csv(in_dir, out_dir, csv_path, model_prefix, dataset_name, threshold, plot_type=1):
anno_arr = np.loadtxt(csv_path, delimiter=',', dtype=str)
result_files_prefix = os.path.join(in_dir, model_prefix );
out_files_prefix = os.path.join(in_dir, model_prefix + '_'+dataset_name);
#lbl = np.load(result_files_prefix + '_individual_labels.npy', allow_pickle=True);
anno_lbl = anno_arr[:, 2:]
print('anno_lbl',anno_lbl.shape)
anno_lbl[anno_lbl=='']='0'
anno_lbl[anno_lbl==' ']='0'
anno_lbl = anno_lbl.astype(int)
#pred = np.load(result_files_prefix + '_pred_new.npy');
if(os.path.isfile(result_files_prefix + '_pred_new.npy')):
pred = np.load(result_files_prefix + '_pred_new.npy', allow_pickle=True);
elif(os.path.isfile(result_files_prefix + '_pred_prob.npy')):
pred = np.load(result_files_prefix + '_pred_prob.npy', allow_pickle=True);
filenames = pickle.load(open(result_files_prefix + '_filename.pkl', 'rb'));
#print('pred.shape = ', pred.shape)
pred= pred.squeeze();
print('pred.shape = ', pred.shape)
#pred = pred[:,:,1] ;
if(len(pred.shape) > 2 and pred.shape[2]>1):
pred = pred[:,:,1];
elif(len(pred.shape) > 2 and pred.shape[2]==1):
pred = pred[:,:,0];
elif(len(pred.shape) == 2):
pred = pred;
lbl = np.zeros((pred.shape[0], anno_lbl.shape[-1]))
for i in range (len(filenames)):
f = filenames[i]
#anno_row = anno_arr[np.where(anno_arr[:,1]==f)]
lbl[i] = anno_lbl[np.where(anno_arr[:,1]==f)]
ctype = pickle.load(open(result_files_prefix + '_cancer_type.pkl', 'rb'));
ctype = np.array(ctype);
if(not (exclude_ctype is None)):
ctype = pickle.load(open(result_files_prefix + '_cancer_type.pkl', 'rb'));
ctype = np.array(ctype);
pred = pred[np.where(ctype!=exclude_ctype)]
lbl = lbl[np.where(ctype!=exclude_ctype)]
ctype = ctype[np.where(ctype!=exclude_ctype)]
print('include_ctype=',include_ctype)
if(not (include_ctype is None)):
ctype = pickle.load(open(result_files_prefix + '_cancer_type.pkl', 'rb'));
ctype = np.array(ctype);
pred = pred[np.where(ctype==include_ctype)]
lbl = lbl[np.where(ctype==include_ctype)]
ctype = ctype[np.where(ctype==include_ctype)]
print(np.where(ctype==include_ctype)[0])
filenames = np.array(filenames)[np.where(ctype==include_ctype)]
print(filenames)
# label values are: 1, 2, 3, 4,; empty, ignore value of 4 and empty
lbl[np.where(lbl==4)] = 0 ;
# to get the average score label need to get the count of scores available for each super patch
b = lbl>0;
n = b.sum(axis = 1);
n[np.where(n ==0)] = -1; # set to -1 the count = 0 to avoid division by zero
# get the average score label by summing each patch scores and divide by count then round
lbl2 = np.divide(lbl.sum(axis = 1), n);
lbl2 = np.round(lbl2);
print('lbl=1', len(np.where(lbl2 == 1)[0])) # 23
print('lbl=2', len(np.where(lbl2 == 2)[0])) # 29
print('lbl=3', len(np.where(lbl2 == 3)[0])) # 11
# get the sub patches that are predicted positive according to threshold
# the pred is super patch -> sub patch -> logit neg, logit pos
pred_b = pred > threshold ;
# get the number of subpatches predicted positive in each superpatch
pred_n = pred_b.sum(axis = 1)
print('np.unique(pred_n)',np.unique(pred_n))
# get the number of subpatches predicted positive in each superpatch in each score label category 1,2,3
pred_n1 = pred_n[np.where(lbl2 == 1)]
pred_n2 = pred_n[np.where(lbl2 == 2)]
pred_n3 = pred_n[np.where(lbl2 == 3)]
ctype_name = ''
if(not (include_ctype is None)):
ctype_name = '_'+include_ctype
with open(os.path.join(out_dir, model_prefix +ctype_name+ '_lbl.txt'), 'w') as file:
for i in range(len(lbl)):
file.write(filenames[i] + ',' + str(pred_n[i]) + ','+str(int(lbl2[i]))+ ',' + str(ctype[i])+'\n');
#print(pred_n1) ;
#print(np.where(lbl2 == 1)) ;
#print(lbl[np.where(lbl2 == 1)]) ;
if(plot_type == 0 or plot_type == 1 or plot_type == 2):
if(not(0 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [0]))
if(not(64 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [64]))
if(not(0 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [0]))
if(not(64 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [64]))
if(not(0 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [0]))
if(not(64 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [64]))
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
#data = {'pred_n':pred_n}
#sns.violinplot(y=pred_n3, bw=1) # multiplies bw by the std to control smoothness
#sns.violinplot(y=pred_n3, bw=1, cut=0) # cut =0 means do not extend beyond data range default is 2
#sns.violinplot(y=pred_n3, bw=1, cut=0, scale='count') # scale reflects the relative shapes of the different violins 'width:same width, area:same area, count:width relative to count in category'
#sns.violinplot(y=pred_n3, bw=1, cut=0, scale='width')
#sns.violinplot(y=pred_n3, bw=1, cut=0, width=0.5) # the width of the violin default is 0.8
if(plot_type == 0):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, width=0.7, scale='width', ax=ax)
elif(plot_type == 1):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], bw=1, cut=0, width=0.5, scale='width', ax=ax) # original (1)
elif(plot_type == 2 or plot_type == 3):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, ax=ax)
ax.set(xticklabels=['low', 'medium', 'high'])
#plt.show();
fig.savefig(os.path.join(out_dir, model_prefix +'_' +dataset_name+'_violin'+'_type'+str(plot_type)+'_review.png'));
return;
def process_results_violin(in_dir, out_dir, model_prefix, dataset_name, threshold, plot_type=1, exclude_ctype=None, include_ctype=None):
result_files_prefix = os.path.join(in_dir, model_prefix );
out_files_prefix = os.path.join(in_dir, model_prefix + '_'+dataset_name);
lbl = np.load(result_files_prefix + '_individual_labels.npy');
#pred = np.load(result_files_prefix + '_pred_new.npy');
if(os.path.isfile(result_files_prefix + '_pred_new.npy')):
pred = np.load(result_files_prefix + '_pred_new.npy', allow_pickle=True);
elif(os.path.isfile(result_files_prefix + '_pred_prob.npy')):
pred = np.load(result_files_prefix + '_pred_prob.npy', allow_pickle=True);
filenames = pickle.load(open(result_files_prefix + '_filename.pkl', 'rb'));
#print('pred.shape = ', pred.shape)
pred= pred.squeeze();
print('pred.shape = ', pred.shape)
#pred = pred[:,:,1] ;
if(len(pred.shape) > 2 and pred.shape[2]>1):
pred = pred[:,:,1];
elif(len(pred.shape) > 2 and pred.shape[2]==1):
pred = pred[:,:,0];
elif(len(pred.shape) == 2):
pred = pred;
ctype = pickle.load(open(result_files_prefix + '_cancer_type.pkl', 'rb'));
ctype = np.array(ctype);
if(not (exclude_ctype is None)):
ctype = pickle.load(open(result_files_prefix + '_cancer_type.pkl', 'rb'));
ctype = np.array(ctype);
pred = pred[np.where(ctype!=exclude_ctype)]
lbl = lbl[np.where(ctype!=exclude_ctype)]
ctype = ctype[np.where(ctype!=exclude_ctype)]
print('include_ctype=',include_ctype)
if(not (include_ctype is None)):
ctype = pickle.load(open(result_files_prefix + '_cancer_type.pkl', 'rb'));
ctype = np.array(ctype);
pred = pred[np.where(ctype==include_ctype)]
lbl = lbl[np.where(ctype==include_ctype)]
ctype = ctype[np.where(ctype==include_ctype)]
print(np.where(ctype==include_ctype)[0])
filenames = np.array(filenames)[np.where(ctype==include_ctype)]
print(filenames)
# label values are: 1, 2, 3, 4,; empty, ignore value of 4 and empty
lbl[np.where(lbl==4)] = 0 ;
# to get the average score label need to get the count of scores available for each super patch
b = lbl>0;
n = b.sum(axis = 1);
n[np.where(n ==0)] = -1; # set to -1 the count = 0 to avoid division by zero
# get the average score label by summing each patch scores and divide by count then round
lbl2 = np.divide(lbl.sum(axis = 1), n);
lbl2 = np.round(lbl2);
print('lbl=1', len(np.where(lbl2 == 1)[0])) # 23
print('lbl=2', len(np.where(lbl2 == 2)[0])) # 29
print('lbl=3', len(np.where(lbl2 == 3)[0])) # 11
# get the sub patches that are predicted positive according to threshold
# the pred is super patch -> sub patch -> logit neg, logit pos
pred_b = pred > threshold ;
# get the number of subpatches predicted positive in each superpatch
pred_n = pred_b.sum(axis = 1)
print('np.unique(pred_n)',np.unique(pred_n))
# get the number of subpatches predicted positive in each superpatch in each score label category 1,2,3
pred_n1 = pred_n[np.where(lbl2 == 1)]
pred_n2 = pred_n[np.where(lbl2 == 2)]
pred_n3 = pred_n[np.where(lbl2 == 3)]
ctype_name = ''
if(not (include_ctype is None)):
ctype_name = '_'+include_ctype
with open(os.path.join(out_dir, model_prefix +ctype_name+ '_lbl.txt'), 'w') as file:
for i in range(len(lbl)):
file.write(filenames[i] + ',' + str(pred_n[i]) + ','+str(int(lbl2[i])) + ',' + str(ctype[i])+'\n');
#print(pred_n1) ;
#print(np.where(lbl2 == 1)) ;
#print(lbl[np.where(lbl2 == 1)]) ;
if(plot_type == 0 or plot_type == 1 or plot_type == 2):
if(not(0 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [0]))
if(not(64 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [64]))
if(not(0 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [0]))
if(not(64 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [64]))
if(not(0 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [0]))
if(not(64 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [64]))
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
#data = {'pred_n':pred_n}
#sns.violinplot(y=pred_n3, bw=1) # multiplies bw by the std to control smoothness
#sns.violinplot(y=pred_n3, bw=1, cut=0) # cut =0 means do not extend beyond data range default is 2
#sns.violinplot(y=pred_n3, bw=1, cut=0, scale='count') # scale reflects the relative shapes of the different violins 'width:same width, area:same area, count:width relative to count in category'
#sns.violinplot(y=pred_n3, bw=1, cut=0, scale='width')
#sns.violinplot(y=pred_n3, bw=1, cut=0, width=0.5) # the width of the violin default is 0.8
if(plot_type == 0):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, width=0.7, scale='width', ax=ax)
elif(plot_type == 1):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], bw=1, cut=0, width=0.5, scale='width', ax=ax) # original (1)
elif(plot_type == 2 or plot_type == 3):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, ax=ax)
ax.set(xticklabels=['low', 'medium', 'high'])
#plt.show();
fig.savefig(os.path.join(out_dir, model_prefix +'_' +dataset_name+'_violin'+'_type'+str(plot_type)+'.png'));
return;
def save_n_pred_pos(in_dir, model_prefix, threshold):
result_files_prefix = os.path.join(in_dir, model_prefix );
#pred = np.load(result_files_prefix + '_pred_new.npy');
if(os.path.isfile(result_files_prefix + '_pred_new.npy')):
pred = np.load(result_files_prefix + '_pred_new.npy', allow_pickle=True);
elif(os.path.isfile(result_files_prefix + '_pred_prob.npy')):
pred = np.load(result_files_prefix + '_pred_prob.npy', allow_pickle=True);
# get the sub patches that are predicted positive according to threshold
# the pred is super patch -> sub patch -> logit neg, logit pos
pred= pred.squeeze();
#pred = pred[:,:,1] ;
if(len(pred.shape) > 2 and pred.shape[2]>1):
pred = pred[:,:,1];
elif(len(pred.shape) > 2 and pred.shape[2]==1):
pred = pred[:,:,0];
elif(len(pred.shape) == 2):
pred = pred;
pred_b = pred > threshold ;
# get the number of subpatches predicted positive in each superpatch
pred_n = pred_b.sum(axis = 1)
pred_n.dump(result_files_prefix + '_pred_n.npy');
return;
def process_results_violin_old_model(in_dir, out_dir, model_prefix, dataset_name, plot_type=1, exclude_ctype=None):
result_files_prefix = os.path.join(in_dir, model_prefix);
out_files_prefix = os.path.join(in_dir, model_prefix);
lbl = np.load(result_files_prefix + '_individual_labels.npy', allow_pickle=True);
pred_n_str = np.load(result_files_prefix + '_pred_old.npy', allow_pickle=True);
if(not (exclude_ctype is None)):
ctype = pickle.load(open(result_files_prefix + '_cancer_type.pkl', 'rb'));
ctype = np.array(ctype);
pred_n_str = pred_n_str[np.where(ctype!=exclude_ctype)]
lbl = lbl[np.where(ctype!=exclude_ctype)]
# label values are: 1, 2, 3, 4,; empty, ignore value of 4 and empty
lbl[np.where(lbl==4)] = 0 ;
# to get the average score label need to get the count of scores available for each super patch
b = lbl>0;
n = b.sum(axis = 1);
n[np.where(n ==0)] = -1; # set to -1 the count = 0 to avoid division by zero
# get the average score label by summing each patch scores and divide by count then round
lbl2 = np.divide(lbl.sum(axis = 1), n);
lbl2 = np.round(lbl2);
## get the sub patches that are predicted positive according to threshold
## the pred is super patch -> sub patch -> logit neg, logit pos
#pred= pred.squeeze();
#pred = pred[:,:,1] ;
#pred_b = pred > threshold ;
# get the number of subpatches predicted positive in each superpatch
#pred_n = pred_b.sum(axis = 1)
pred_n = pred_n_str.astype(np.int)
# get the number of subpatches predicted positive in each superpatch in each score label category 1,2,3
pred_n1 = pred_n[np.where(lbl2 == 1)]
pred_n2 = pred_n[np.where(lbl2 == 2)]
pred_n3 = pred_n[np.where(lbl2 == 3)]
if(plot_type == 0 or plot_type == 1 or plot_type == 2):
if(not(0 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [0]))
if(not(64 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [64]))
if(not(0 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [0]))
if(not(64 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [64]))
if(not(0 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [0]))
if(not(64 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [64]))
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
#ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], bw=0.5, cut=0, width=0.5, scale='width', ax=ax)
#ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, width=0.5, scale='width', ax=ax)
if(plot_type == 0):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, width=0.7, scale='width', ax=ax)
elif(plot_type == 1):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], bw=1, cut=0, width=0.5, scale='width', ax=ax) # original (1)
elif(plot_type == 2 or plot_type == 3):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, ax=ax)
ax.set(xticklabels=['low', 'medium', 'high'])
#plt.show();
fig.savefig('baseline.png');
return;
def process_results_violin_old_model_w_thresh26(csv_path, plot_type=1, exclude_ctype=None):
cancer_type_list = [];
filename_list = [];
individual_labels_list = []
avg_label_list = []
pred_old_list = []
# read csv file
with open(csv_path, 'r') as label_file:
line = label_file.readline(); # skip title line
line = label_file.readline();
while(line):
c, s, p, i1, i2, i3, i4, i5, i6, pred_old, pred_thresh23_nec, pred_thresh23, pred_thresh26_nec, pred_thresh26= line.split(',');
if (i1.strip()==""):
i1 = 0;
if (i2.strip()==""):
i2 = 0;
if (i3.strip()==""):
i3 = 0;
if (i4.strip()==""):
i4 = 0;
if (i5.strip()==""):
i5 = 0;
if (i6.strip()==""):
i6 = 0;
cancer_type_list.append(c);
filename_list.append(s+'_'+p+'.png');
individual_labels_list.append([int(i1), int(i2), int(i3), int(i4), int(i5), int(i6)]);
avg_label_list.append(np.mean(np.array([float(i1), float(i2), float(i3), float(i4), float(i5), float(i6)])));
#pred_old_list.append(pred_old);
pred_old_list.append(pred_thresh26);
line = label_file.readline();
lbl = np.array(individual_labels_list);
pred_n_str = np.array(pred_old_list);
if(not (exclude_ctype is None)):
ctype = np.array(cancer_type_list);
pred_n_str = pred_n_str[np.where(ctype!=exclude_ctype)]
lbl = lbl[np.where(ctype!=exclude_ctype)]
# label values are: 1, 2, 3, 4,; empty, ignore value of 4 and empty
lbl[np.where(lbl==4)] = 0 ;
# to get the average score label need to get the count of scores available for each super patch
b = lbl>0;
n = b.sum(axis = 1);
n[np.where(n ==0)] = -1; # set to -1 the count = 0 to avoid division by zero
# get the average score label by summing each patch scores and divide by count then round
lbl2 = np.divide(lbl.sum(axis = 1), n);
lbl2 = np.round(lbl2);
## get the sub patches that are predicted positive according to threshold
## the pred is super patch -> sub patch -> logit neg, logit pos
#pred= pred.squeeze();
#pred = pred[:,:,1] ;
#pred_b = pred > threshold ;
# get the number of subpatches predicted positive in each superpatch
#pred_n = pred_b.sum(axis = 1)
pred_n = pred_n_str.astype(np.int)
# get the number of subpatches predicted positive in each superpatch in each score label category 1,2,3
pred_n1 = pred_n[np.where(lbl2 == 1)]
pred_n2 = pred_n[np.where(lbl2 == 2)]
pred_n3 = pred_n[np.where(lbl2 == 3)]
if(plot_type == 0 or plot_type == 1 or plot_type == 2):
if(not(0 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [0]))
if(not(64 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [64]))
if(not(0 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [0]))
if(not(64 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [64]))
if(not(0 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [0]))
if(not(64 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [64]))
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
#ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], bw=0.5, cut=0, width=0.5, scale='width')
#ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, width=0.5, scale='width')
if(plot_type == 0):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, width=0.7, scale='width', ax=ax)
elif(plot_type == 1):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], bw=1, cut=0, width=0.5, scale='width', ax=ax) # original (1)
elif(plot_type == 2 or plot_type == 3):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, ax=ax)
ax.set(xticklabels=['low', 'medium', 'high'])
#plt.show();
fig.savefig('baseline_th26.png');
return;
def process_results_violin_han(label_filepath, pred_filepath, out_dir, model_prefix, dataset_name, plot_type=3, ctype='brca'):
cancer_type_list = [];
filename_list = [];
individual_labels_list = []
with open(os.path.join(label_filepath), 'r') as label_file:
line = label_file.readline();
line = label_file.readline();
while(line):
c, s, p, i1, i2, i3, i4, i5, i6, pred_old= line.split(',');
print(c,s,p)
if(not (ctype is None) and not (c.strip() == ctype)):
line = label_file.readline();
continue;
if (i1.strip()==""):
i1 = 0;
if (i2.strip()==""):
i2 = 0;
if (i3.strip()==""):
i3 = 0;
if (i4.strip()==""):
i4 = 0;
if (i5.strip()==""):
i5 = 0;
if (i6.strip()==""):
i6 = 0;
cancer_type_list.append(c);
filename_list.append(s+'_'+p+'.png');
individual_labels_list.append([int(i1), int(i2), int(i3), int(i4), int(i5), int(i6)]);
line = label_file.readline();
pred_filename_list = [];
pred_n_list = [];
pred_individual_labels = [];
with open(os.path.join(pred_filepath), 'r') as file:
line = file.readline();
while(line):
s, pred = line.split(',');
print(s, pred)
pred_filename_list.append(s);
pred_n_list.append(int(pred));
line = file.readline();
pred_n = np.array(pred_n_list);
for i in range(len(pred_filename_list)):
patch_filename = pred_filename_list[i].strip();
print(patch_filename )
for j in range(len(filename_list)):
if(filename_list[j].strip() == patch_filename):
print('found')
pred_individual_labels.append(individual_labels_list[j]);
break;
lbl = np.array(pred_individual_labels);
# label values are: 1, 2, 3, 4,; empty, ignore value of 4 and empty
lbl[np.where(lbl==4)] = 0 ;
# to get the average score label need to get the count of scores available for each super patch
b = lbl>0;
n = b.sum(axis = 1);
n[np.where(n ==0)] = -1; # set to -1 the count = 0 to avoid division by zero
# get the average score label by summing each patch scores and divide by count then round
lbl2 = np.divide(lbl.sum(axis = 1), n);
lbl2 = np.round(lbl2);
## get the sub patches that are predicted positive according to threshold
## the pred is super patch -> sub patch -> logit neg, logit pos
#pred= pred.squeeze();
#pred = pred[:,:,1] ;
#pred_b = pred > threshold ;
# get the number of subpatches predicted positive in each superpatch
#pred_n = pred_b.sum(axis = 1)
#pred_n = pred_n_str.astype(np.int)
# get the number of subpatches predicted positive in each superpatch in each score label category 1,2,3
pred_n1 = pred_n[np.where(lbl2 == 1)]
pred_n2 = pred_n[np.where(lbl2 == 2)]
pred_n3 = pred_n[np.where(lbl2 == 3)]
print('lbl=1', len(np.where(lbl2 == 1)[0])) # 23
print('lbl=2', len(np.where(lbl2 == 2)[0])) # 29
print('lbl=3', len(np.where(lbl2 == 3)[0])) # 11
if(plot_type == 0 or plot_type == 1 or plot_type == 2):
if(not(0 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [0]))
if(not(64 in pred_n1)):
pred_n1 = np.concatenate((pred_n1, [64]))
if(not(0 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [0]))
if(not(64 in pred_n2)):
pred_n2 = np.concatenate((pred_n2, [64]))
if(not(0 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [0]))
if(not(64 in pred_n3)):
pred_n3 = np.concatenate((pred_n3, [64]))
fig,ax = plt.subplots(1)
sns.set(style="whitegrid")
#ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], bw=0.5, cut=0, width=0.5, scale='width', ax=ax)
#ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, width=0.5, scale='width', ax=ax)
if(plot_type == 0):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, width=0.7, scale='width', ax=ax)
elif(plot_type == 1):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], bw=1, cut=0, width=0.5, scale='width', ax=ax) # original (1)
elif(plot_type == 2 or plot_type == 3):
ax = sns.violinplot(data=[pred_n1,pred_n2,pred_n3], cut=0, ax=ax)
ax.set(xticklabels=['low', 'medium', 'high'])
#plt.show();
fig.savefig(os.path.join(out_dir, model_prefix +'_' +dataset_name+'_violin'+'_type'+str(plot_type)+'.png'));
return;
if __name__ == "__main__":
#model_prefix = "tcga_incv4_mix_new3";
#threshold = 0.41;
#in_dir = "/home/shahira/TIL_classification/superpatch_merge";
#out_dir = "/home/shahira/TIL_classification/eval_superpatch_merge/incep_poly_th0.4"
#model_prefix = "tcga_vgg16_mix_new3";
#threshold = 0.4;
#in_dir = "/home/shahira/TIL_classification/eval_superpatch_merge"
#out_dir = "/home/shahira/TIL_classification/eval_superpatch_merge/vgg_poly_th0.4"
model_prefix = ""
threshold = 0.56
in_dir = "/home/shahira/TIL_classification/eval_superpatch_merge/resnet34_e12"
out_dir = "/home/shahira/TIL_classification/eval_superpatch_merge/resnet_poly_th0.56"
dataset_name = "superpatches_merged"
csv_path = '/home/shahira/TIL_classification/superpatch_merge/super-patches-label_m.csv'
title_line = 'filename,label,ctype,patch_count,Anne1,Anne2,Raj1,Raj2,Rebecca1,Rebecca2'
process_results_violin_use_anno_csv_outraw_merged(in_dir, out_dir, csv_path, model_prefix, dataset_name, threshold, title_line);
#out_dir = "/home/shahira/TIL_classification/eval_superpatch_merge/le_poly";
#process_le_results_violin_use_anno_csv_outraw_merged(out_dir, csv_path, dataset_name, title_line)
#############################################################################################################
#model_prefix = "tcga_incv4_mix_new3";
#threshold = 0.41;
#in_dir = "/home/shahira/TIL_classification/superpatches_anno/superpatches_eval";
#out_dir = "/home/shahira/TIL_classification/superpatches_anno/superpatches_eval/poly_th0.4"
#model_prefix = "tcga_vgg16_mix_new3";
#threshold = 0.4;
#in_dir = "/home/shahira/TIL_classification/superpatches_anno/superpatches_eval";
#out_dir = "/home/shahira/TIL_classification/superpatches_anno/superpatches_eval/vgg_poly_th0.4"
#model_prefix = "";
#threshold = 0.56;
#in_dir = "/home/shahira/TIL_classification/superpatches_anno/superpatches_eval/resnet34_e12";
#out_dir = "/home/shahira/TIL_classification/superpatches_anno/superpatches_eval/resnet34_e12/poly_th0.56"
#dataset_name = "superpatches_review"
#csv_path = '/home/shahira/TIL_classification/superpatches_anno/anno_reviewed_individual.csv'
#title_line = 'filename,label,ctype,patch_count,John,Anne,Rebecca'
#process_results_violin_use_anno_csv_outraw_reviewed(in_dir, out_dir, csv_path, model_prefix, dataset_name, threshold, title_line);
| 49.990741
| 209
| 0.621411
| 8,125
| 53,990
| 3.873231
| 0.037169
| 0.036543
| 0.044932
| 0.023546
| 0.943756
| 0.937051
| 0.921385
| 0.911026
| 0.900095
| 0.893613
| 0
| 0.030051
| 0.222782
| 53,990
| 1,079
| 210
| 50.037071
| 0.719913
| 0.267142
| 0
| 0.864979
| 0
| 0
| 0.074302
| 0.01272
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014065
| false
| 0
| 0.011252
| 0
| 0.025316
| 0.049226
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b6960366388b6c286abc095820a9a39836a969f8
| 49
|
py
|
Python
|
recipes/recipes_emscripten/traits/test_import_traits.py
|
emscripten-forge/recipes
|
62cb3e146abc8945ac210f38e4e47c080698eae5
|
[
"MIT"
] | 1
|
2022-03-10T16:50:56.000Z
|
2022-03-10T16:50:56.000Z
|
recipes/recipes_emscripten/traits/test_import_traits.py
|
emscripten-forge/recipes
|
62cb3e146abc8945ac210f38e4e47c080698eae5
|
[
"MIT"
] | 9
|
2022-03-18T09:26:38.000Z
|
2022-03-29T09:21:51.000Z
|
recipes/recipes_emscripten/traits/test_import_traits.py
|
emscripten-forge/recipes
|
62cb3e146abc8945ac210f38e4e47c080698eae5
|
[
"MIT"
] | null | null | null |
def test_import_traits():
import traits
| 12.25
| 25
| 0.673469
| 6
| 49
| 5.166667
| 0.666667
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265306
| 49
| 4
| 26
| 12.25
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
b6b0778eeb7bd57e9a131e6f282405cd201a11f1
| 180
|
py
|
Python
|
python/common/perforce/p426/__init__.py
|
CountZer0/PipelineConstructionSet
|
0aa73a8a63c72989b2d1c677efd78dad4388d335
|
[
"BSD-3-Clause"
] | 21
|
2015-04-27T05:01:36.000Z
|
2021-11-22T13:45:14.000Z
|
python/common/perforce/p426/__init__.py
|
0xb1dd1e/PipelineConstructionSet
|
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
|
[
"BSD-3-Clause"
] | null | null | null |
python/common/perforce/p426/__init__.py
|
0xb1dd1e/PipelineConstructionSet
|
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
|
[
"BSD-3-Clause"
] | 7
|
2015-04-11T11:37:19.000Z
|
2020-05-22T09:49:04.000Z
|
'''
Author: Jason.Parks
Created: Jan 17, 2012
Module: THQ_common.thq_perforce.p426.__init__
Purpose: to import p426
'''
print "THQ_common.thq_perforce.p426.__init__ imported"
| 18
| 55
| 0.766667
| 26
| 180
| 4.846154
| 0.692308
| 0.142857
| 0.190476
| 0.31746
| 0.444444
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0.094937
| 0.122222
| 180
| 9
| 56
| 20
| 0.702532
| 0
| 0
| 0
| 0
| 0
| 0.754098
| 0.606557
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 1
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 9
|
fcad336cce91328fc795cea5b4ca292fffeb683b
| 1,756
|
py
|
Python
|
executive/cloudcontrol/ajax.py
|
b800h/vcloudexecutive
|
aa556664b454ba5d5112fa6c07dde8db8a7dfab4
|
[
"BSD-3-Clause"
] | 1
|
2019-03-14T11:13:00.000Z
|
2019-03-14T11:13:00.000Z
|
executive/cloudcontrol/ajax.py
|
bmcollier/vcloudexecutive
|
aa556664b454ba5d5112fa6c07dde8db8a7dfab4
|
[
"BSD-3-Clause"
] | null | null | null |
executive/cloudcontrol/ajax.py
|
bmcollier/vcloudexecutive
|
aa556664b454ba5d5112fa6c07dde8db8a7dfab4
|
[
"BSD-3-Clause"
] | null | null | null |
from django.http import HttpResponse
from django.utils.html import strip_tags
import json
import requests
class storeout:
def __init__(self, key):
self.actions = {}
self.key = key
def save(self, value):
self.actions[self.key] = value
def start_server(request):
server_name = request.GET.get('server_name')
server_response = '{"percent_complete": "' + '10' + '","status":"' + 'ok' + '"}'
return HttpResponse(server_response, content_type='application/json')
def suspend_server(request):
server_name = request.GET.get('server_name')
return HttpResponse(server_response, content_type='application/json')
def stop_server(request):
server_name = request.GET.get('server_name')
server_response = '{"percent_complete": "' + '100' + '","status":"' + 'ok' + '"}' # Hardcoded to stop immediately with 100
return HttpResponse(server_response, content_type='application/json')
def boost_server(request):
server_name = request.GET.get('server_name')
response = requests.get('http://localhost:8888/boost/vm-b8e95c38-b899-496e-bd6b-bcfec39fc52e', data=None)
json_data = json.loads(response.text)
server_response = '{"percent_complete": "' + str(json_data['progress']) + '","status":"' + 'ok' + '"}'
return HttpResponse(server_response, content_type='application/json')
def deboost_server(request):
server_name = request.GET.get('server_name')
response = requests.get('http://localhost:8888/deboost/vm-b8e95c38-b899-496e-bd6b-bcfec39fc52e', data=None)
json_data = json.loads(response.text)
server_response = '{"percent_complete": "' + str(json_data['progress']) + '","status":"' + 'ok' + '"}'
return HttpResponse(server_response, content_type='application/json')
| 45.025641
| 126
| 0.701595
| 213
| 1,756
| 5.586854
| 0.2723
| 0.084034
| 0.079832
| 0.096639
| 0.776471
| 0.776471
| 0.776471
| 0.776471
| 0.776471
| 0.635294
| 0
| 0.031809
| 0.140661
| 1,756
| 39
| 127
| 45.025641
| 0.756793
| 0.02164
| 0
| 0.424242
| 0
| 0
| 0.258591
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 1
| 0.212121
| false
| 0
| 0.121212
| 0
| 0.515152
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
1e963c29fdf6967bfd0ce2c4a369a568569efb05
| 25,863
|
py
|
Python
|
code/nn_models.py
|
nateGeorge/stock_prediction
|
e7520c24d1174b197188f198d5e2e9487b7a2d0c
|
[
"Apache-2.0"
] | null | null | null |
code/nn_models.py
|
nateGeorge/stock_prediction
|
e7520c24d1174b197188f198d5e2e9487b7a2d0c
|
[
"Apache-2.0"
] | null | null | null |
code/nn_models.py
|
nateGeorge/stock_prediction
|
e7520c24d1174b197188f198d5e2e9487b7a2d0c
|
[
"Apache-2.0"
] | null | null | null |
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, LSTM, Flatten, Embedding, GlobalMaxPooling1D
from keras.regularizers import l2
from keras.layers.core import Reshape
from keras.layers.wrappers import TimeDistributed
from keras.layers.convolutional import Conv1D
from keras.layers.pooling import MaxPooling1D
from keras.initializers import glorot_normal
from keras.layers.pooling import GlobalAveragePooling1D
from keras.optimizers import RMSprop
from keras.layers.normalization import BatchNormalization
from keras.callbacks import History
import numpy as np
from keras.layers.advanced_activations import LeakyReLU
from keras_tqdm import TQDMNotebookCallback
import plotly
plotly.offline.init_notebook_mode()
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
import math
import pandas as pd
# hyperparameters
EPOCHS = 200
BATCH = 100
def create_nn_data(train_fs, test_fs):
# NOTE: to use keras's RNN LSTM module our input must be reshaped to [samples, stepsize, window size]
# our stepsize is 1 because we increment the time by 1 for each sample
# window size is 30 currently
X_trains = {}
X_tests = {}
for s in train_fs.keys():
X_trains[s] = np.asarray(np.reshape(train_fs[s], (train_fs[s].shape[0], 1, train_fs[s].shape[1])))
X_tests[s] = np.asarray(np.reshape(test_fs[s], (test_fs[s].shape[0], 1, test_fs[s].shape[1])))
return X_trains, X_tests
def create_nn_data_pcts(train, test):
# NOTE: to use keras's RNN LSTM module our input must be reshaped to [samples, stepsize, window size]
# our stepsize is 1 because we increment the time by 1 for each sample
# window size is 30 currently
X_train = np.asarray(np.reshape(train, (train.shape[0], 1, train.shape[1])))
X_test = np.asarray(np.reshape(test, (test.shape[0], 1, test.shape[1])))
return X_train, X_test
def create_nn_data4conv1d(train_fs, test_fs):
# NOTE: to use keras's RNN LSTM module our input must be reshaped to [samples, stepsize, window size]
# our stepsize is 1 because we increment the time by 1 for each sample
# window size is 30 currently
X_trains = {}
X_tests = {}
for s in train_fs.keys():
X_trains[s] = np.asarray(np.reshape(train_fs[s], (train_fs[s].shape[0], train_fs[s].shape[1], 1)))
X_tests[s] = np.asarray(np.reshape(test_fs[s], (test_fs[s].shape[0], test_fs[s].shape[1], 1)))
return X_trains, X_tests
def create_model_1(X_train):
"""
Found that this is overfitting because the test data (val) loss
goes down and then way up.
"""
model = Sequential()
model.add(LSTM(256, input_shape=X_train.shape[1:], activation=None, return_sequences=True))
model.add(elu)
model.add(Dropout(0.5))
model.add(LSTM(256, activation=None))
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(1))
# build model using keras documentation recommended optimizer initialization
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# compile the model
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
def create_model_complex(X_train):
"""
adding 2 more dense layers with dropout
"""
model = Sequential()
model.add(LSTM(256,
input_shape=X_train.shape[1:],
activation=None,
kernel_initializer='glorot_normal',
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01),
return_sequences=True))
model.add(LeakyReLU())
# model.add(Dropout(0.5))
model.add(LSTM(256,
activation=None,
kernel_initializer='glorot_normal',
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)))
model.add(LeakyReLU())
model.add(Dense(256, kernel_initializer='glorot_normal'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Reshape((-1, 1)))
model.add(Conv1D(64,
30,
strides=1,
kernel_initializer='glorot_normal',
padding='valid',
activation=None))
model.add(BatchNormalization())
model.add(LeakyReLU())
# https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='valid'))
model.add(Conv1D(128,
30,
strides=1,
kernel_initializer='glorot_normal',
padding='valid',
activation=None))
model.add(BatchNormalization())
model.add(LeakyReLU())
# https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='valid'))
model.add(Flatten())
model.add(Dense(1024, kernel_initializer='glorot_normal'))
model.add(BatchNormalization())
model.add(LeakyReLU)
model.add(Dropout(0.5))
model.add(Dense(512, kernel_initializer='glorot_normal'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(128, kernel_initializer='glorot_normal'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dense(1, kernel_initializer='glorot_normal'))
# build model using keras documentation recommended optimizer initialization
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# compile the model
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
def create_model(X_train):
"""
loss of 0.11 with 90 days history and 5 days prediction
"""
model = Sequential()
model.add(LSTM(256,
input_shape=X_train.shape[1:],
activation=None,
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01),
return_sequences=True))
model.add(LeakyReLU())
# model.add(Dropout(0.5))
model.add(LSTM(256,
activation=None,
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)))
model.add(LeakyReLU())
model.add(Dense(256))
model.add(Dropout(0.5))
model.add(Reshape((-1, 1)))
model.add(Conv1D(64,
15,
strides=1,
padding='valid',
activation=None))
# https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='valid'))
model.add(Conv1D(128,
15,
strides=1,
padding='valid',
activation=None))
# https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='valid'))
model.add(Flatten())
model.add(Dense(64))
# model.add(Dropout(0.5))
model.add(Dense(1))
# build model using keras documentation recommended optimizer initialization
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# compile the model
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
def create_conv1d_model(X_train):
"""
"""
model = Sequential()
# example here: https://gist.github.com/jkleint/1d878d0401b28b281eb75016ed29f2ee
model.add(Conv1D(64,
30,
strides=1,
padding='valid',
kernel_initializer='glorot_normal',
activation=None,
input_shape=(X_train.shape[1], 1)
))
model.add(BatchNormalization())
model.add(LeakyReLU())
# https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='valid'))
model.add(Conv1D(256,
30,
strides=1,
padding='valid',
kernel_initializer='glorot_normal',
activation=None,
input_shape=(X_train.shape[1], 1)
))
model.add(BatchNormalization())
model.add(LeakyReLU())
# https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='valid'))
# model.add(Flatten()) # dimensions were too big with this
model.add(GlobalAveragePooling1D())
model.add(Dense(256, kernel_initializer='glorot_normal'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(128, kernel_initializer='glorot_normal'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(1))
# build model using keras documentation recommended optimizer initialization
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# compile the model
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
def create_model_lstm(X_train):
"""
"""
model = Sequential()
model.add(LSTM(256,
input_shape=X_train.shape[1:],
activation=None,
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01),
return_sequences=True))
model.add(LeakyReLU())
# model.add(Dropout(0.5))
model.add(LSTM(256,
activation=None,
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)))
model.add(LeakyReLU())
model.add(Dense(256))
model.add(Dropout(0.5))
model.add(Reshape((-1, 1)))
model.add(Conv1D(64,
15,
strides=1,
padding='valid',
activation=None))
# https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='valid'))
model.add(Conv1D(128,
15,
strides=1,
padding='valid',
activation=None))
# https://github.com/fchollet/keras/issues/4403 note on TimeDistributed
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='valid'))
model.add(Flatten())
model.add(Dense(64))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dense(1))
# compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
return model
def embed_model(X_train):
model = Sequential()
max_features = math.ceil(X_train.ravel().max())
print('max_features for embed layer: ', max_features)
embedding_dims = 50
model.add(Embedding(max_features,
embedding_dims,
input_length=X_train.shape[1],
embeddings_regularizer=l2(1e-4)))
model.add(Dropout(0.2))
model.add(Conv1D(32, 3, padding='valid', activation='relu', strides=1))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(MaxPooling1D(pool_size=2,
strides=2,
padding='valid'))
model.add(Conv1D(64, 3, padding='valid', activation='relu', strides=1))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(GlobalMaxPooling1D())
model.add(Dense(100))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
print('Complete.')
return model
def fit_model_nb(model, X_train, train_t, X_test, test_t):
# for fitting the model in a jupyter notebook
history = History()
model.fit(X_train,
train_t,
epochs=EPOCHS,
batch_size=BATCH,
validation_data=[X_test, test_t],
verbose=0,
callbacks=[TQDMNotebookCallback(), history])
return history
def fit_model(model, X_train, train_t, X_test, test_t):
history = History()
model.fit(X_train,
train_t,
epochs=EPOCHS,
batch_size=BATCH,
validation_data=[X_test, test_t],
verbose=1,
callbacks=[history])
return history
def fit_model_silent(model, X_train, train_t, X_test, test_t, epochs=EPOCHS):
history = History()
model.fit(X_train,
train_t,
epochs=epochs,
batch_size=BATCH,
validation_data=[X_test, test_t],
verbose=0,
callbacks=[history])
return history
def plot_losses(history):
"""
Plots train and val losses from neural net training.
"""
trace0 = go.Scatter(
x = history.epoch,
y = history.history['loss'],
mode = 'lines+markers',
name = 'loss'
)
trace1 = go.Scatter(
x = history.epoch,
y = history.history['val_loss'],
mode = 'lines+markers',
name = 'test loss'
)
f = iplot({'data':[trace0, trace1]})
def plot_data_preds_scaled(model, stock, scaled_ts, scaled_fs, dates, train_test='all', train_frac=0.85, future_days=5):
if train_test == 'all':
# vertical line should be on the first testing set point
dates = dates[stock]
train_size = int(train_frac * dates.shape[0])
print(train_size)
feats = scaled_fs[stock]
print(feats.shape)
for_pred = feats.reshape(feats.shape[0],
1, feats.shape[1])
preds = model.predict(for_pred).ravel()
print(max([max(scaled_ts[stock].ravel()), max(preds)]))
layout = {'shapes': [
{
'type': 'rect',
# stupid hack to deal with pandas issue
'x0': dates[train_size].date().strftime('%Y-%m-%d'),
'y0': 1.1 * min([min(scaled_ts[stock].ravel()), min(preds)]),
'x1': dates[-1].date().strftime('%Y-%m-%d'),
'y1': 1.1 * max([max(scaled_ts[stock].ravel()), max(preds)]),
'line': {
'color': 'rgb(255, 0, 0)',
'width': 2,
},
'fillcolor': 'rgba(128, 0, 128, 0.05)',
},
{
'type': 'line',
# first line is just before first point of test set
'x0': dates[train_size+future_days].date().strftime('%Y-%m-%d'),
'y0': 1.1 * min([min(scaled_ts[stock].ravel()), min(preds)]),
'x1': dates[train_size+future_days].date().strftime('%Y-%m-%d'),
'y1': 1.1 * max([max(scaled_ts[stock].ravel()), max(preds)]),
'line': {
'color': 'rgb(0, 255, 0)',
'width': 2,
}
}]}
trace0 = go.Scatter(
x = dates,
y = scaled_ts[stock].ravel(),
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = dates,
y = preds,
mode = 'lines+markers',
name = 'predictions'
)
f = iplot({'data':[trace0, trace1], 'layout':layout})
elif train_test == 'train':
train_size = int(train_frac * dfs[stock].shape[0])
feats = scaled_fs[stock][:train_size]
for_pred = feats.reshape(feats.shape[0],
1, feats.shape[1])
trace0 = go.Scatter(
x = dfs[stock].iloc[:train_size].index,
y = scaled_ts[stock].ravel(),
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = dfs[stock].iloc[:train_size].index,
y = model.predict(for_pred).ravel(),
mode = 'lines+markers',
name = 'predictions'
)
f = iplot([trace0, trace1])
elif train_test == 'test':
train_size = int(train_frac * dfs[stock].shape[0])
feats = scaled_fs[stock][train_size:]
for_pred = feats.reshape(feats.shape[0],
1, feats.shape[1])
trace0 = go.Scatter(
x = dfs[stock].iloc[train_size:].index,
y = scaled_ts[stock].ravel(),
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = dfs[stock].iloc[train_size:].index,
y = model.predict(for_pred).ravel(),
mode = 'lines+markers',
name = 'predictions'
)
f = iplot([trace0, trace1])
else:
print('error! You have to supply train_test as \'all\', \'train\', or \'test\'')
def plot_data_preds_unscaled(model, stock, t_scalers, scaled_ts, scaled_fs, targs, dates, datapoints=300, train_frac=0.85, future_days=5):
dates = dates[stock]
train_size = int(train_frac * dates.shape[0])
for_preds = scaled_fs[stock].reshape(scaled_fs[stock].shape[0],
1, scaled_fs[stock].shape[1])
preds = model.predict(for_preds).ravel()
unscaled_preds = t_scalers[stock].reform_data(preds, orig=True)
if datapoints == 'all':
datapoints = dates.shape[0]
layout = {'shapes': [
{
'type': 'rect',
# first line is just before first point of test set
'x0': dates[train_size].date().strftime('%Y-%m-%d'),
'y0': 1.1 * min([min(targs[stock][-datapoints:]), min(unscaled_preds.ravel()[-datapoints:])]),
'x1': dates[-1].date().strftime('%Y-%m-%d'),
'y1': 1.1 * max([max(targs[stock][-datapoints:]), max(unscaled_preds.ravel()[-datapoints:])]),
'line': {
'color': 'rgb(255, 0, 0)',
'width': 2,
},
'fillcolor': 'rgba(128, 0, 128, 0.05)',
},
{
'type': 'line',
# first line is just before first point of test set
'x0': dates[train_size+future_days].date().strftime('%Y-%m-%d'),
'y0': 1.1 * min([min(targs[stock][-datapoints:]), min(unscaled_preds.ravel()[-datapoints:])]),
'x1': dates[train_size+future_days].date().strftime('%Y-%m-%d'),
'y1': 1.1 * max([max(targs[stock][-datapoints:]), max(unscaled_preds.ravel()[-datapoints:])]),
'line': {
'color': 'rgb(0, 255, 0)',
'width': 2,
}
}],
'yaxis': {'title': 'GLD price'}}
trace0 = go.Scatter(
x = dates[-datapoints:],
y = targs[stock][-datapoints:],
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = dates[-datapoints:],
y = unscaled_preds.ravel()[-datapoints:],
mode = 'lines+markers',
name = 'predictions'
)
f = iplot({'data':[trace0, trace1], 'layout':layout})
def plot_data_preds_unscaled_future(model, stock, t_scalers, scaled_ts, scaled_fs, targs, dates, datapoints=300, future_days=20):
"""
plots training data and future prices of unseen data
"""
for_preds = scaled_fs[stock].reshape(scaled_fs[stock].shape[0],
1, scaled_fs[stock].shape[1])
preds = model.predict(for_preds).ravel()
unscaled_preds = t_scalers[stock].reform_data(preds, orig=True)
if datapoints == 'all':
datapoints = dates[stock].shape[0]
# need to generate more dates for the unseen data
pred_dates = dfs[stock].index + pd.Timedelta(str(future_days) + ' days')
trace0 = go.Scatter(
x = pred_dates[-datapoints:],
y = targs[stock][-datapoints:],
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = pred_dates[-datapoints:],
y = unscaled_preds.ravel()[-datapoints:],
mode = 'lines+markers',
name = 'predictions'
)
f = iplot({'data':[trace0, trace1]})
def plot_data_preds_unscaled_embed(model, stock, dfs, t_scalers, scaled_ts, scaled_fs, targs, datapoints=300, train_frac=0.85):
train_size = int(train_frac * dfs[stock].shape[0])
for_preds = scaled_fs[stock]
preds = model.predict(for_preds).ravel()
unscaled_preds = t_scalers[stock].reform_data(preds, orig=True)
if datapoints == 'all':
datapoints = dfs[stock].shape[0]
layout = {'shapes': [
{
'type': 'rect',
# stupid hack to deal with pandas issue
'x0': dfs[stock].iloc[train_size:train_size + 1].index[0].date().strftime('%Y-%m-%d'),
'y0': 1.1 * min([min(targs[stock][-datapoints:]), min(unscaled_preds.ravel()[-datapoints:])]),
'x1': dfs[stock].iloc[-2:-1].index[0].date().strftime('%Y-%m-%d'),
'y1': 1.1 * max([max(targs[stock][-datapoints:]), max(unscaled_preds.ravel()[-datapoints:])]),
'line': {
'color': 'rgb(255, 0, 0)',
'width': 2,
},
'fillcolor': 'rgba(128, 0, 128, 0.05)',
}]}
trace0 = go.Scatter(
x = dfs[stock].index[-datapoints:],
y = targs[stock][-datapoints:],
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = dfs[stock].index[-datapoints:],
y = unscaled_preds.ravel()[-datapoints:],
mode = 'lines+markers',
name = 'predictions'
)
f = iplot({'data':[trace0, trace1], 'layout':layout})
def plot_data_preds_scaled_conv1d(model, stock, dfs, scaled_ts, scaled_fs, train_test='all', train_frac=0.85):
if train_test == 'all':
# vertical line should be on the first testing set point
train_size = int(train_frac * dfs[stock].shape[0])
print(train_size)
feats = scaled_fs[stock]
for_pred = feats.reshape(feats.shape[0],
feats.shape[1],
1)
preds = model.predict(for_pred).ravel()
print(max([max(scaled_ts[stock].ravel()), max(preds)]))
layout = {'shapes': [
{
'type': 'rect',
# stupid hack to deal with pandas issue
'x0': dfs[stock].iloc[train_size:train_size + 1].index[0].date().strftime('%Y-%m-%d'),
'y0': 1.1 * min([min(scaled_ts[stock].ravel()), min(preds)]),
'x1': dfs[stock].iloc[-2:-1].index[0].date().strftime('%Y-%m-%d'),
'y1': 1.1 * max([max(scaled_ts[stock].ravel()), max(preds)]),
'line': {
'color': 'rgb(255, 0, 0)',
'width': 2,
},
'fillcolor': 'rgba(128, 0, 128, 0.05)',
}]}
trace0 = go.Scatter(
x = dfs[stock].index,
y = scaled_ts[stock].ravel(),
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = dfs[stock].index,
y = preds,
mode = 'lines+markers',
name = 'predictions'
)
f = iplot({'data':[trace0, trace1], 'layout':layout})
elif train_test == 'train':
train_size = int(train_frac * dfs[stock].shape[0])
feats = scaled_fs[stock][:train_size]
for_pred = feats.reshape(feats.shape[0],
feats.shape[1],
1)
trace0 = go.Scatter(
x = dfs[stock].iloc[:train_size].index,
y = scaled_ts[stock].ravel(),
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = dfs[stock].iloc[:train_size].index,
y = model.predict(for_pred).ravel(),
mode = 'lines+markers',
name = 'predictions'
)
f = iplot([trace0, trace1])
elif train_test == 'test':
train_size = int(train_frac * dfs[stock].shape[0])
feats = scaled_fs[stock][train_size:]
for_pred = feats.reshape(feats.shape[0],
feats.shape[1],
1)
trace0 = go.Scatter(
x = dfs[stock].iloc[train_size:].index,
y = scaled_ts[stock].ravel(),
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = dfs[stock].iloc[train_size:].index,
y = model.predict(for_pred).ravel(),
mode = 'lines+markers',
name = 'predictions'
)
f = iplot([trace0, trace1])
else:
print('error! You have to supply train_test as \'all\', \'train\', or \'test\'')
def plot_data_preds_unscaled_conv1d(model, stock, dfs, t_scalers, scaled_fs, targs):
for_preds = scaled_fs[stock].reshape(scaled_fs[stock].shape[0],
scaled_fs[stock].shape[1],
1)
preds = model.predict(for_preds).ravel()
unscaled_preds = t_scalers[stock].reform_data(preds, orig=True)
datapoints = 300
trace0 = go.Scatter(
x = dfs[stock].index[-datapoints:],
y = targs[stock][-datapoints:],
mode = 'lines+markers',
name = 'actual'
)
trace1 = go.Scatter(
x = dfs[stock].index[-datapoints:],
y = unscaled_preds.ravel()[-datapoints:],
mode = 'lines+markers',
name = 'predictions'
)
f = iplot([trace0, trace1])
| 35.331967
| 138
| 0.560221
| 3,078
| 25,863
| 4.589669
| 0.097791
| 0.060027
| 0.015573
| 0.031146
| 0.862462
| 0.832307
| 0.820344
| 0.805196
| 0.787924
| 0.768033
| 0
| 0.036105
| 0.3007
| 25,863
| 731
| 139
| 35.380301
| 0.744996
| 0.098712
| 0
| 0.790816
| 0
| 0
| 0.070485
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032313
| false
| 0
| 0.034014
| 0
| 0.086735
| 0.015306
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bf493de91258eed58d9e37170ba202439cdc59db
| 2,782
|
py
|
Python
|
tests/images/test_models.py
|
jeanmask/opps
|
031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87
|
[
"MIT"
] | 159
|
2015-01-03T16:36:35.000Z
|
2022-03-29T20:50:13.000Z
|
tests/images/test_models.py
|
jeanmask/opps
|
031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87
|
[
"MIT"
] | 81
|
2015-01-02T21:26:16.000Z
|
2021-05-29T12:24:52.000Z
|
tests/images/test_models.py
|
jeanmask/opps
|
031c6136c38d43aa6d1ccb25a94f7bcd65ccbf87
|
[
"MIT"
] | 75
|
2015-01-23T13:41:03.000Z
|
2021-09-24T03:45:23.000Z
|
# -*- encoding: utf-8 -*-
from django.test import TestCase
from django.db import models
from opps.images.models import Cropping, HALIGN_CHOICES, VALIGN_CHOICES
class CroppingFields(TestCase):
def test_crop_example(self):
field = Cropping._meta.get_field_by_name('crop_example')[0]
self.assertTrue(field.__class__, models.CharField)
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_crop_x1(self):
field = Cropping._meta.get_field_by_name('crop_x1')[0]
self.assertTrue(field.__class__, models.PositiveSmallIntegerField)
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_crop_x2(self):
field = Cropping._meta.get_field_by_name('crop_x2')[0]
self.assertTrue(field.__class__, models.PositiveSmallIntegerField)
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_crop_y1(self):
field = Cropping._meta.get_field_by_name('crop_y1')[0]
self.assertTrue(field.__class__, models.PositiveSmallIntegerField)
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_crop_y2(self):
field = Cropping._meta.get_field_by_name('crop_y2')[0]
self.assertTrue(field.__class__, models.PositiveSmallIntegerField)
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_flip(self):
field = Cropping._meta.get_field_by_name('flip')[0]
self.assertTrue(field.__class__, models.BooleanField)
self.assertFalse(field.default)
def test_flop(self):
field = Cropping._meta.get_field_by_name('flop')[0]
self.assertTrue(field.__class__, models.BooleanField)
self.assertFalse(field.default)
def test_halign(self):
field = Cropping._meta.get_field_by_name('halign')[0]
self.assertTrue(field.__class__, models.CharField)
self.assertFalse(field.default)
self.assertTrue(field.null)
self.assertTrue(field.blank)
self.assertEqual(field.choices, HALIGN_CHOICES)
def test_valign(self):
field = Cropping._meta.get_field_by_name('valign')[0]
self.assertTrue(field.__class__, models.CharField)
self.assertFalse(field.default)
self.assertTrue(field.null)
self.assertTrue(field.blank)
self.assertEqual(field.choices, VALIGN_CHOICES)
def test_fit_in(self):
field = Cropping._meta.get_field_by_name('fit_in')[0]
self.assertTrue(field.__class__, models.BooleanField)
self.assertFalse(field.default)
def test_smart(self):
field = Cropping._meta.get_field_by_name('smart')[0]
self.assertTrue(field.__class__, models.BooleanField)
self.assertFalse(field.default)
| 37.594595
| 74
| 0.699137
| 339
| 2,782
| 5.39823
| 0.135693
| 0.191257
| 0.259563
| 0.12623
| 0.840437
| 0.840437
| 0.840437
| 0.840437
| 0.703825
| 0.597268
| 0
| 0.008885
| 0.19087
| 2,782
| 73
| 75
| 38.109589
| 0.804087
| 0.008267
| 0
| 0.525424
| 0
| 0
| 0.025753
| 0
| 0
| 0
| 0
| 0
| 0.559322
| 1
| 0.186441
| false
| 0
| 0.050847
| 0
| 0.254237
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bfc7971399e6d3ff4d23cb457f9016b6b6faf515
| 130
|
py
|
Python
|
mrinversion/kernel/__init__.py
|
DeepanshS/mrinversion
|
b1060f3150a5bf04162dfed499221f040b3bae21
|
[
"BSD-3-Clause"
] | 1
|
2020-10-27T14:48:50.000Z
|
2020-10-27T14:48:50.000Z
|
mrinversion/kernel/__init__.py
|
deepanshs/mrinversion
|
b1060f3150a5bf04162dfed499221f040b3bae21
|
[
"BSD-3-Clause"
] | 13
|
2021-06-07T00:59:53.000Z
|
2022-03-02T16:31:54.000Z
|
mrinversion/kernel/__init__.py
|
deepanshs/mrinversion
|
b1060f3150a5bf04162dfed499221f040b3bae21
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from mrinversion.kernel.relaxation import T1 # NOQA
from mrinversion.kernel.relaxation import T2 # NOQA
| 32.5
| 52
| 0.738462
| 17
| 130
| 5.647059
| 0.647059
| 0.3125
| 0.4375
| 0.645833
| 0.770833
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.146154
| 130
| 3
| 53
| 43.333333
| 0.837838
| 0.238462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
44c7923cb1769183184e091d94f3b369f1f0a57f
| 4,801
|
py
|
Python
|
bert_spd/replacement_scheduler.py
|
shaoyiHusky/SparseProgressiveDistillation
|
88f6c8e7b5a8fef4359d44b0dae6f6d66292f748
|
[
"MIT"
] | 2
|
2022-01-03T05:24:50.000Z
|
2022-03-05T22:06:20.000Z
|
bert_spd/replacement_scheduler.py
|
shaoyiHusky/SparseProgressiveDistillation
|
88f6c8e7b5a8fef4359d44b0dae6f6d66292f748
|
[
"MIT"
] | null | null | null |
bert_spd/replacement_scheduler.py
|
shaoyiHusky/SparseProgressiveDistillation
|
88f6c8e7b5a8fef4359d44b0dae6f6d66292f748
|
[
"MIT"
] | null | null | null |
from bert_spd import BertEncoder
class ConstantReplacementScheduler:
def __init__(self, bert_encoder: BertEncoder, replacing_rate, replacing_steps=None):
self.bert_encoder = bert_encoder
self.replacing_rate = replacing_rate
self.replacing_steps = replacing_steps
self.step_counter = 0
self.bert_encoder.set_replacing_rate(replacing_rate)
def step(self):
self.step_counter += 1
if self.replacing_steps is None or self.replacing_rate == 1.0:
return self.replacing_rate
else:
if self.step_counter >= self.replacing_steps:
self.bert_encoder.set_replacing_rate(1.0)
self.replacing_rate = 1.0
return self.replacing_rate
class LinearReplacementScheduler:
def __init__(self, bert_encoder: BertEncoder, base_replacing_rate, k):
self.bert_encoder = bert_encoder
self.base_replacing_rate = base_replacing_rate
self.step_counter = 0
self.k = k
self.bert_encoder.set_replacing_rate(base_replacing_rate)
def step(self):
self.step_counter += 1
current_replacing_rate = min(self.k * self.step_counter + self.base_replacing_rate, 1.0)
print('step_counter: ', self.step_counter, 'replacing_rate: ', current_replacing_rate)
self.bert_encoder.set_replacing_rate(current_replacing_rate)
return current_replacing_rate
class MixedReplacementScheduler:
def __init__(self, bert_encoder: BertEncoder, replacing_rate, k, replacing_steps=None):
self.bert_encoder = bert_encoder
self.replacing_rate = replacing_rate
self.replacing_steps = replacing_steps
self.step_counter = 0
self.k = k
self.bert_encoder.set_replacing_rate(replacing_rate)
def step(self):
self.step_counter += 1
if self.step_counter < self.replacing_steps or self.replacing_rate == 1.0:
print('step_counter: ', self.step_counter, 'replacing_rate: ', self.replacing_rate)
return self.replacing_rate
else:
if self.step_counter >= self.replacing_steps:
current_replacing_rate = min(self.k * (self.step_counter - self.replacing_steps) + self.replacing_rate, 1.0)
self.bert_encoder.set_replacing_rate(current_replacing_rate)
print('step_counter: ', self.step_counter, 'current_replacing_rate: ', current_replacing_rate)
return current_replacing_rate
class ConstantThenLinearReplacementScheduler:
def __init__(self, bert_encoder: BertEncoder, replacing_rate, base_replacing_rate, k, replacing_steps=None):
self.bert_encoder = bert_encoder
self.replacing_rate = replacing_rate
self.base_replacing_rate = base_replacing_rate
self.replacing_steps = replacing_steps
self.step_counter = 0
self.k = k
self.bert_encoder.set_replacing_rate(replacing_rate)
def step(self):
self.step_counter += 1
if self.step_counter < self.replacing_steps or self.replacing_rate == 1.0:
print('step_counter: ', self.step_counter, 'replacing_rate: ', self.replacing_rate)
return self.replacing_rate
else:
if self.step_counter >= self.replacing_steps:
current_replacing_rate = min(self.k * (self.step_counter - self.replacing_steps) + self.base_replacing_rate, 1.0)
self.bert_encoder.set_replacing_rate(current_replacing_rate)
print('step_counter: ', self.step_counter, 'current_replacing_rate: ', current_replacing_rate)
return current_replacing_rate
class CustomizedLinearReplacementScheduler:
def __init__(self, bert_encoder: BertEncoder, replacing_rate, k, constant_replacing_rate, constant_replacing_step):
self.bert_encoder = bert_encoder
self.constant_replacing_rate = constant_replacing_rate
self.constant_replacing_step = constant_replacing_step
self.base_replacing_rate = replacing_rate
self.step_counter = 0
self.k = k
self.bert_encoder.set_replacing_rate(self.constant_replacing_rate)
def step(self):
self.step_counter += 1
if self.step_counter < self.constant_replacing_step:
print('step_counter: ', self.step_counter, 'replacing_rate: ', self.constant_replacing_rate)
return self.constant_replacing_rate
else:
current_replacing_rate = min(self.k * (self.step_counter - self.constant_replacing_step) + self.base_replacing_rate, 1.0)
print('step_counter: ', self.step_counter, 'replacing_rate: ', current_replacing_rate)
self.bert_encoder.set_replacing_rate(current_replacing_rate)
return current_replacing_rate
| 46.163462
| 133
| 0.700687
| 583
| 4,801
| 5.380789
| 0.060034
| 0.310807
| 0.129104
| 0.05738
| 0.913293
| 0.877909
| 0.828499
| 0.800446
| 0.756455
| 0.683456
| 0
| 0.007515
| 0.223912
| 4,801
| 103
| 134
| 46.61165
| 0.834407
| 0
| 0
| 0.704545
| 0
| 0
| 0.047074
| 0.009581
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.011364
| 0
| 0.284091
| 0.079545
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
44d50ef4374eea69494bf91544eaa1221461fb68
| 785
|
py
|
Python
|
pava/implementation/natives/com/sun/org/apache/xalan/internal/xsltc/compiler/Mode.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | 4
|
2017-03-30T16:51:16.000Z
|
2020-10-05T12:25:47.000Z
|
pava/implementation/natives/com/sun/org/apache/xalan/internal/xsltc/compiler/Mode.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
pava/implementation/natives/com/sun/org/apache/xalan/internal/xsltc/compiler/Mode.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
def add_native_methods(clazz):
def flattenAlternative__com_sun_org_apache_xalan_internal_xsltc_compiler_Pattern__com_sun_org_apache_xalan_internal_xsltc_compiler_Template__java_util_Map_java_lang_String__com_sun_org_apache_xalan_internal_xsltc_compiler_Key___(a0, a1, a2, a3, a4):
raise NotImplementedError()
clazz.flattenAlternative__com_sun_org_apache_xalan_internal_xsltc_compiler_Pattern__com_sun_org_apache_xalan_internal_xsltc_compiler_Template__java_util_Map_java_lang_String__com_sun_org_apache_xalan_internal_xsltc_compiler_Key___ = flattenAlternative__com_sun_org_apache_xalan_internal_xsltc_compiler_Pattern__com_sun_org_apache_xalan_internal_xsltc_compiler_Template__java_util_Map_java_lang_String__com_sun_org_apache_xalan_internal_xsltc_compiler_Key___
| 112.142857
| 461
| 0.942675
| 116
| 785
| 5.327586
| 0.25
| 0.087379
| 0.131068
| 0.218447
| 0.893204
| 0.893204
| 0.893204
| 0.893204
| 0.893204
| 0.893204
| 0
| 0.006631
| 0.03949
| 785
| 6
| 462
| 130.833333
| 0.812997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
44d74d8f340c7a5f753743f6a5015a8c69ee7cb5
| 5,265
|
py
|
Python
|
backup/torchray4mmf/multimodal_extremal_perturbation_test.py
|
yongkangzzz/mmfgroup
|
098a78c83e1c2973dc895d1dc7fd30d7d3668143
|
[
"MIT"
] | null | null | null |
backup/torchray4mmf/multimodal_extremal_perturbation_test.py
|
yongkangzzz/mmfgroup
|
098a78c83e1c2973dc895d1dc7fd30d7d3668143
|
[
"MIT"
] | null | null | null |
backup/torchray4mmf/multimodal_extremal_perturbation_test.py
|
yongkangzzz/mmfgroup
|
098a78c83e1c2973dc895d1dc7fd30d7d3668143
|
[
"MIT"
] | null | null | null |
from multimodal_extremal_perturbation import multi_extremal_perturbation
from torchray.attribution.extremal_perturbation import contrastive_reward
import torch
import matplotlib.pyplot as plt
def testMultiExtremalPerturbationStandardCase():
from mmf.models.mmbt import MMBT
from custom_mmbt import MMBTGridHMInterfaceOnlyImage
device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")
text = "How I want to say hello to Asian people"
model = MMBTGridHMInterfaceOnlyImage(
MMBT.from_pretrained("mmbt.hateful_memes.images"), text)
model = model.to(device)
image_path = "https://img.17qq.com/images/ghhngkfnkwy.jpeg"
image_tensor = model.imageToTensor(image_path)
# if device has some error just comment it
image_tensor = image_tensor.to(device)
_out, out, = multi_extremal_perturbation(model,
torch.unsqueeze(image_tensor, 0),
image_path,
text,
0,
reward_func=contrastive_reward,
debug=True,
max_iter=200,
areas=[0.12],
show_text_result=True)
def testMultiExtremalPerturbationWithFloatMaskArea():
from mmf.models.mmbt import MMBT
from custom_mmbt import MMBTGridHMInterfaceOnlyImage
device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")
text = "How I want to say hello to Asian people"
model = MMBTGridHMInterfaceOnlyImage(
MMBT.from_pretrained("mmbt.hateful_memes.images"), text)
model = model.to(device)
image_path = "https://img.17qq.com/images/ghhngkfnkwy.jpeg"
image_tensor = model.imageToTensor(image_path)
# if device has some error just comment it
image_tensor = image_tensor.to(device)
_out, out, = multi_extremal_perturbation(model,
torch.unsqueeze(image_tensor, 0),
image_path,
text,
0,
reward_func=contrastive_reward,
debug=True,
max_iter=200,
areas=0.12,
show_text_result=True)
def testMultiExtremalPerturbationWithDeleteVarient():
from mmf.models.mmbt import MMBT
from custom_mmbt import MMBTGridHMInterfaceOnlyImage
device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")
text = "How I want to say hello to Asian people"
model = MMBTGridHMInterfaceOnlyImage(
MMBT.from_pretrained("mmbt.hateful_memes.images"), text)
model = model.to(device)
image_path = "https://img.17qq.com/images/ghhngkfnkwy.jpeg"
image_tensor = model.imageToTensor(image_path)
# if device has some error just comment it
image_tensor = image_tensor.to(device)
_out, out, = multi_extremal_perturbation(model,
torch.unsqueeze(image_tensor, 0),
image_path,
text,
0,
reward_func=contrastive_reward,
debug=True,
max_iter=200,
areas=0.12,
variant="delete",
show_text_result=True)
def testMultiExtremalPerturbationWithSmoothMask():
from mmf.models.mmbt import MMBT
from custom_mmbt import MMBTGridHMInterfaceOnlyImage
device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")
text = "How I want to say hello to Asian people"
model = MMBTGridHMInterfaceOnlyImage(
MMBT.from_pretrained("mmbt.hateful_memes.images"), text)
model = model.to(device)
image_path = "https://img.17qq.com/images/ghhngkfnkwy.jpeg"
image_tensor = model.imageToTensor(image_path)
# if device has some error just comment it
image_tensor = image_tensor.to(device)
_out, out, = multi_extremal_perturbation(model,
torch.unsqueeze(image_tensor, 0),
image_path,
text,
0,
reward_func=contrastive_reward,
debug=True,
max_iter=200,
areas=[0.12],
smooth=0.5,
show_text_result=True)
| 41.132813
| 78
| 0.505223
| 464
| 5,265
| 5.556034
| 0.170259
| 0.06827
| 0.048487
| 0.026377
| 0.849884
| 0.841738
| 0.841738
| 0.841738
| 0.841738
| 0.841738
| 0
| 0.015364
| 0.431339
| 5,265
| 128
| 79
| 41.132813
| 0.845691
| 0.030959
| 0
| 0.893617
| 0
| 0
| 0.092978
| 0.019616
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.12766
| 0
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.