hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
953c1796ae0166ed754566691bfcac7af760d53c | 112 | py | Python | win_app_packager/__main__.py | barry-scott/PythonWinAppPackager | 82a67aa87ee1bd3fafc9b6056960161da8c02d41 | [
"Apache-2.0"
] | 3 | 2016-01-20T10:22:22.000Z | 2020-02-18T05:31:29.000Z | win_app_packager/__main__.py | barry-scott/PythonWinAppPackager | 82a67aa87ee1bd3fafc9b6056960161da8c02d41 | [
"Apache-2.0"
] | 1 | 2016-04-19T00:40:49.000Z | 2016-05-10T03:07:38.000Z | win_app_packager/__main__.py | barry-scott/PythonWinAppPackager | 82a67aa87ee1bd3fafc9b6056960161da8c02d41 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import sys
import win_app_packager
sys.exit( win_app_packager.dispatchCommand( sys.argv ) )
| 18.666667 | 56 | 0.794643 | 17 | 112 | 5 | 0.647059 | 0.141176 | 0.329412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009901 | 0.098214 | 112 | 5 | 57 | 22.4 | 0.831683 | 0.151786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9541f2ebedddad4915dc548f023d13ce17054c93 | 4,767 | py | Python | regress/tests-flood.py | fp7-ofelia/VeRTIGO | 11f39f819196c8352611852435dea17bc6a2292f | [
"BSD-3-Clause"
] | 2 | 2016-10-12T08:20:00.000Z | 2017-05-09T13:13:18.000Z | regress/tests-flood.py | fp7-ofelia/VeRTIGO | 11f39f819196c8352611852435dea17bc6a2292f | [
"BSD-3-Clause"
] | null | null | null | regress/tests-flood.py | fp7-ofelia/VeRTIGO | 11f39f819196c8352611852435dea17bc6a2292f | [
"BSD-3-Clause"
] | 1 | 2020-10-01T07:57:34.000Z | 2020-10-01T07:57:34.000Z | #!/usr/bin/python
from fvregress import *
import string # really? you have to do this?
import sys
wantPause = True
#################################### Start Tests
try:
h = FvRegress()
port=16633
h.addController("alice", 54321)
h.addController("bob", 54322)
if len(sys.argv) > 1 :
wantPause = False
port=int(sys.argv[1])
timeout=60
h.useAlreadyRunningFlowVisor(port)
else:
wantPause = False
timeout=5
h.spawnFlowVisor(configFile="tests-flood.xml")
h.lamePause()
h.addSwitch(name='switch1',port=port)
if wantPause:
doPause("start tests")
############################################################
feature_request = FvRegress.OFVERSION + '05 0008 2d47 c5eb'
feature_request_after = FvRegress.OFVERSION + '05 0008 0000 0102'
h.runTest(name="feature_request",timeout=timeout, events= [
TestEvent( "send","guest","alice", feature_request),
TestEvent( "recv","switch","switch1", feature_request_after,strict=True),
])
############################################################
feature_reply = FvRegress.OFVERSION + '''06 00e0 0000 0102 0000 76a9
d40d 2548 0000 0100 0200 0000 0000 001f
0000 03ff 0000 1ac1 51ff ef8a 7665 7468
3100 0000 0000 0000 0000 0000 0000 0000
0000 0000 0000 00c0 0000 0000 0000 0000
0000 0000 0001 ce2f a287 f670 7665 7468
3300 0000 0000 0000 0000 0000 0000 0000
0000 0000 0000 00c0 0000 0000 0000 0000
0000 0000 0002 ca8a 1ef3 77ef 7665 7468
3500 0000 0000 0000 0000 0000 0000 0000
0000 0000 0000 00c0 0000 0000 0000 0000
0000 0000 0003 fabc 778d 7e0b 7665 7468
3700 0000 0000 0000 0000 0000 0000 0000
0000 0000 0000 00c0 0000 0000 0000 0000
0000 0000'''
# this reply should strip the STP bit, and trim the ports down to the allowable set
feature_reply_after =FvRegress.OFVERSION + '''06 00b0 2d47 c5eb 0000 76a9 d40d 2548
0000 0100 0200 0000 0000 001f 0000 03ff
0000 1ac1 51ff ef8a 7665 7468 3100 0000
0000 0000 0000 0000 0000 0000 0000 0000
0000 00c0 0000 0000 0000 0000 0000 0000
0002 ca8a 1ef3 77ef 7665 7468 3500 0000
0000 0000 0000 0000 0000 0000 0000 0000
0000 00c0 0000 0000 0000 0000 0000 0000
0003fabc778d7e0b766574683700000000000000000000000000000000000000000000c0000000000000000000000000'''
h.runTest(name="feature_reply", timeout=timeout, events= [
TestEvent( "send","switch","switch1", feature_reply),
TestEvent( "recv","guest","alice", feature_reply_after),
])
####################################################################################
packet_out_flood = FvRegress.OFVERSION + '''0d 0058 0000 abcd ffff ffff
ffff 0008 0000 0008 fffb 0080 0000 0000
0001 0000 0000 0002 0800 4500 0032 0000
4000 4011 2868 c0a8 c800 c0a8 c901 0001
0000 001e d7c3 cdc0 251b e6dc ea0c 726d
973f 2b71 c2e4 1b6f bc11 8250'''
packet_out_flood_aftr = FvRegress.OFVERSION + '''0d 00 68 09 01 00 00 ff ff ff ff ff ff 00 18
00 00 00 08 00 00 00 80 00 00 00 08 00 02 00 80
00 00 00 08 00 03 00 80 00 00 00 00 00 01 00 00
00 00 00 02 08 00 45 00 00 32 00 00 40 00 40 11
28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e
d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71
c2 e4 1b 6f bc 11 82 50'''
packet_out2_flood = FvRegress.OFVERSION + '''0d 0058 0000 abcd ffff ffff
ffff 0008 0000 0008 fffb 0080 0000 0000
0002 0000 0000 0001 0800 4500 0032 0000
4000 4011 2868 c0a8 c800 c0a8 c901 0001
0000 001e d7c3 cdc0 251b e6dc ea0c 726d
973f 2b71 c2e4 1b6f bc11 8250'''
packet_out2_flood_aftr = packet_out2_flood
h.runTest(name="packet_out native flood for bob", timeout=timeout, events= [
TestEvent( "send","guest",'alice', packet_out_flood),
TestEvent( "recv","switch",'switch1', packet_out_flood_aftr),
TestEvent( "send","guest",'bob', packet_out2_flood),
TestEvent( "recv","switch",'switch1', packet_out2_flood_aftr),
])
#########################################
# more tests for this setup HERE
#################################### End Tests
finally:
if wantPause:
doPause("start cleanup")
h.cleanup()
| 43.336364 | 115 | 0.563038 | 609 | 4,767 | 4.34647 | 0.313629 | 0.272006 | 0.326407 | 0.362675 | 0.479411 | 0.445032 | 0.417076 | 0.374008 | 0.374008 | 0.374008 | 0 | 0.364497 | 0.313405 | 4,767 | 109 | 116 | 43.733945 | 0.444241 | 0.037969 | 0 | 0.267442 | 0 | 0 | 0.607411 | 0.022514 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034884 | 0 | 0.034884 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
958247dbda1306c6b7a629364a2c9cab15ddea37 | 934 | py | Python | torchrecsys/embeddings/init_embeddings.py | FrancescoI/torchrecsys | 4da133c7d1c5223c8d386571701425122b741543 | [
"MIT"
] | 3 | 2022-02-08T13:42:49.000Z | 2022-02-23T17:37:41.000Z | torchrecsys/embeddings/init_embeddings.py | FrancescoI/torchrecsys | 4da133c7d1c5223c8d386571701425122b741543 | [
"MIT"
] | null | null | null | torchrecsys/embeddings/init_embeddings.py | FrancescoI/torchrecsys | 4da133c7d1c5223c8d386571701425122b741543 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import torch
class ScaledEmbedding(torch.nn.Embedding):
"""
Embedding layer that initialises its values
to using a normal variable scaled by the inverse
of the embedding dimension.
"""
def reset_parameters(self):
"""
Initialize parameters.
"""
self.weight.data.normal_(0, 1.0 / self.embedding_dim)
if self.padding_idx is not None:
self.weight.data[self.padding_idx].fill_(0)
class ZeroEmbedding(torch.nn.Embedding):
"""
Embedding layer that initialises its values
to using a normal variable scaled by the inverse
of the embedding dimension.
Used for biases.
"""
def reset_parameters(self):
"""
Initialize parameters.
"""
self.weight.data.zero_()
if self.padding_idx is not None:
self.weight.data[self.padding_idx].fill_(0) | 25.243243 | 61 | 0.614561 | 112 | 934 | 5.026786 | 0.419643 | 0.099467 | 0.099467 | 0.08881 | 0.820604 | 0.820604 | 0.820604 | 0.820604 | 0.820604 | 0.62167 | 0 | 0.009063 | 0.291221 | 934 | 37 | 62 | 25.243243 | 0.84139 | 0.350107 | 0 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
959a094b44eee7cc42c187871e1b108169fd394b | 27,727 | py | Python | tests/molecular/molecules/test_building_block.py | fiszczyp/stk | 56e75c493a472d98ccbf3af14cc9ce7f12cbe3d7 | [
"MIT"
] | null | null | null | tests/molecular/molecules/test_building_block.py | fiszczyp/stk | 56e75c493a472d98ccbf3af14cc9ce7f12cbe3d7 | [
"MIT"
] | null | null | null | tests/molecular/molecules/test_building_block.py | fiszczyp/stk | 56e75c493a472d98ccbf3af14cc9ce7f12cbe3d7 | [
"MIT"
] | null | null | null | import os
import numpy as np
import stk
import itertools as it
from collections import Counter
from os.path import join
import rdkit.Chem.AllChem as rdkit
if not os.path.exists('building_block_tests_output'):
os.mkdir('building_block_tests_output')
def test_init_rdkit():
rdkit_mol = rdkit.AddHs(rdkit.MolFromSmiles('NCCCN'))
rdkit.EmbedMolecule(rdkit_mol, rdkit.ETKDGv2())
mol0 = stk.BuildingBlock.init_from_rdkit_mol(rdkit_mol, ['amine'])
# Test that all values are initialized correctly.
assert len(mol0.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol0.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol0.atoms) == 15
assert len(mol0.bonds) == 14
atom_count = {
(stk.H, 0): 10,
(stk.N, 0): 2,
(stk.C, 0): 3
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol0.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol0.bonds
)
# Test that caching is working properly.
mol1 = stk.BuildingBlock.init_from_rdkit_mol(rdkit_mol, ['amine'])
assert mol0 is not mol1
mol2 = stk.BuildingBlock.init_from_rdkit_mol(
mol=rdkit_mol,
functional_groups=['amine'],
use_cache=True
)
mol3 = stk.BuildingBlock.init_from_rdkit_mol(
mol=rdkit_mol,
functional_groups=['amine'],
use_cache=True
)
assert mol0 is not mol2 and mol1 is not mol2
assert mol2 is mol3
mol4 = stk.BuildingBlock.init_from_rdkit_mol(
mol=rdkit_mol,
functional_groups=['aldehyde'],
use_cache=True
)
assert mol3 is not mol4
# Make sure that charged molecules are handled correctly.
negative_carbon = rdkit.AddHs(rdkit.MolFromSmiles('NC[C-]CN'))
rdkit.EmbedMolecule(negative_carbon, rdkit.ETKDGv2())
mol5 = stk.BuildingBlock.init_from_rdkit_mol(
mol=negative_carbon,
functional_groups=['amine'],
use_cache=True
)
assert mol5 is not mol0
# Test that all values are initialized correctly.
assert len(mol5.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol5.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol5.atoms) == 13
assert len(mol5.bonds) == 12
atom_count = {
(stk.C, 0): 2,
(stk.C, -1): 1,
(stk.N, 0): 2,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol5.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 4
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol5.bonds
)
negative_nitrogen = rdkit.AddHs(rdkit.MolFromSmiles('[N-]CCCN'))
rdkit.EmbedMolecule(negative_nitrogen, rdkit.ETKDGv2())
mol6 = stk.BuildingBlock.init_from_rdkit_mol(
mol=negative_nitrogen,
functional_groups=['amine'],
use_cache=True
)
assert mol6 is not mol5 and mol6 is not mol0
# Test that all values are initialized correctly.
assert len(mol6.func_groups) == 1
fg_types = stk.dedupe(fg.fg_type.name for fg in mol6.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol6.atoms) == 13
assert len(mol6.bonds) == 12
atom_count = {
(stk.C, 0): 3,
(stk.N, 0): 1,
(stk.N, -1): 1,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol6.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 2,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol6.bonds
)
def test_init_mol(bb_dir):
mol0 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.mol'),
functional_groups=['amine']
)
# Test that all values are initialized correctly.
assert len(mol0.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol0.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol0.atoms) == 15
assert len(mol0.bonds) == 14
atom_count = {
(stk.H, 0): 10,
(stk.N, 0): 2,
(stk.C, 0): 3
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol0.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol0.bonds
)
# Test that caching is working properly.
mol1 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.mol'),
functional_groups=['amine']
)
assert mol0 is not mol1
mol2 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.mol'),
functional_groups=['amine'],
use_cache=True
)
mol3 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.mol'),
functional_groups=['amine'],
use_cache=True
)
assert mol0 is not mol2 and mol1 is not mol2
assert mol2 is mol3
mol4 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.mol'),
functional_groups=['aldehyde'],
use_cache=True
)
assert mol3 is not mol4
# Make sure that charged molecules are handled correctly.
mol5 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'negative_carbon.mol'),
functional_groups=['amine'],
use_cache=True
)
assert mol5 is not mol0
# Test that all values are initialized correctly.
assert len(mol5.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol5.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol5.atoms) == 13
assert len(mol5.bonds) == 12
atom_count = {
(stk.C, 0): 2,
(stk.C, -1): 1,
(stk.N, 0): 2,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol5.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 4
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol5.bonds
)
mol6 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'negative_nitrogen.mol'),
functional_groups=['amine'],
use_cache=True
)
assert mol6 is not mol5 and mol6 is not mol0
# Test that all values are initialized correctly.
assert len(mol6.func_groups) == 1
fg_types = stk.dedupe(fg.fg_type.name for fg in mol6.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol6.atoms) == 13
assert len(mol6.bonds) == 12
atom_count = {
(stk.C, 0): 3,
(stk.N, 0): 1,
(stk.N, -1): 1,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol6.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 2,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol6.bonds
)
def test_init_pdb(bb_dir):
mol0 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.pdb'),
functional_groups=['amine']
)
# Test that all values are initialized correctly.
assert len(mol0.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol0.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol0.atoms) == 15
assert len(mol0.bonds) == 14
atom_count = {
(stk.H, 0): 10,
(stk.N, 0): 2,
(stk.C, 0): 3
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol0.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol0.bonds
)
# Test that caching is working properly.
mol1 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.pdb'),
functional_groups=['amine']
)
assert mol0 is not mol1
mol2 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.pdb'),
functional_groups=['amine'],
use_cache=True
)
mol3 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.pdb'),
functional_groups=['amine'],
use_cache=True
)
assert mol0 is not mol2 and mol1 is not mol2
assert mol2 is mol3
mol4 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'neutral.pdb'),
functional_groups=['aldehyde'],
use_cache=True
)
assert mol3 is not mol4
# Make sure that charged molecules are handled correctly.
mol5 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'negative_carbon.pdb'),
functional_groups=['amine'],
use_cache=True
)
assert mol5 is not mol0
# Test that all values are initialized correctly.
assert len(mol5.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol5.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol5.atoms) == 13
assert len(mol5.bonds) == 12
atom_count = {
(stk.C, 0): 2,
(stk.C, -1): 1,
(stk.N, 0): 2,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol5.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 4
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol5.bonds
)
mol6 = stk.BuildingBlock.init_from_file(
path=join(bb_dir, 'negative_nitrogen.pdb'),
functional_groups=['amine'],
use_cache=True
)
assert mol6 is not mol5 and mol6 is not mol0
# Test that all values are initialized correctly.
assert len(mol6.func_groups) == 1
fg_types = stk.dedupe(fg.fg_type.name for fg in mol6.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol6.atoms) == 13
assert len(mol6.bonds) == 12
atom_count = {
(stk.C, 0): 3,
(stk.N, 0): 1,
(stk.N, -1): 1,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol6.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 2,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol6.bonds
)
def test_init_from_random_file(bb_dir):
mol0 = stk.BuildingBlock.init_from_random_file(
file_glob=join(bb_dir, 'neutral.mol'),
functional_groups=['amine']
)
# Test that all values are initialized correctly.
assert len(mol0.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol0.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol0.atoms) == 15
assert len(mol0.bonds) == 14
atom_count = {
(stk.H, 0): 10,
(stk.N, 0): 2,
(stk.C, 0): 3
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol0.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol0.bonds
)
# Test that caching is working properly.
mol1 = stk.BuildingBlock.init_from_random_file(
file_glob=join(bb_dir, 'neutral.mol'),
functional_groups=['amine']
)
assert mol0 is not mol1
mol2 = stk.BuildingBlock.init_from_random_file(
file_glob=join(bb_dir, 'neutral.mol'),
functional_groups=['amine'],
use_cache=True
)
mol3 = stk.BuildingBlock.init_from_random_file(
file_glob=join(bb_dir, 'neutral.mol'),
functional_groups=['amine'],
use_cache=True
)
assert mol0 is not mol2 and mol1 is not mol2
assert mol2 is mol3
mol4 = stk.BuildingBlock.init_from_random_file(
file_glob=join(bb_dir, 'neutral.mol'),
functional_groups=['aldehyde'],
use_cache=True
)
assert mol3 is not mol4
# Make sure that charged molecules are handled correctly.
mol5 = stk.BuildingBlock.init_from_random_file(
file_glob=join(bb_dir, 'negative_carbon.mol'),
functional_groups=['amine'],
use_cache=True
)
assert mol5 is not mol0
# Test that all values are initialized correctly.
assert len(mol5.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol5.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol5.atoms) == 13
assert len(mol5.bonds) == 12
atom_count = {
(stk.C, 0): 2,
(stk.C, -1): 1,
(stk.N, 0): 2,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol5.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 4
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol5.bonds
)
mol6 = stk.BuildingBlock.init_from_random_file(
file_glob=join(bb_dir, 'negative_nitrogen.mol'),
functional_groups=['amine'],
use_cache=True
)
assert mol6 is not mol5 and mol6 is not mol0
# Test that all values are initialized correctly.
assert len(mol6.func_groups) == 1
fg_types = stk.dedupe(fg.fg_type.name for fg in mol6.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol6.atoms) == 13
assert len(mol6.bonds) == 12
atom_count = {
(stk.C, 0): 3,
(stk.N, 0): 1,
(stk.N, -1): 1,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol6.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 2,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol6.bonds
)
def test_init_from_smiles():
mol0 = stk.BuildingBlock('NCCCN', ['amine'])
# Test that all values are initialized correctly.
assert len(mol0.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol0.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol0.atoms) == 15
assert len(mol0.bonds) == 14
atom_count = {
(stk.H, 0): 10,
(stk.N, 0): 2,
(stk.C, 0): 3
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol0.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol0.bonds
)
# Test that caching is working properly.
mol1 = stk.BuildingBlock('NCCCN', ['amine'])
assert mol0 is not mol1
mol2 = stk.BuildingBlock(
smiles='NCCCN',
functional_groups=['amine'],
use_cache=True
)
mol3 = stk.BuildingBlock(
smiles='NCCCN',
functional_groups=['amine'],
use_cache=True
)
assert mol0 is not mol2 and mol1 is not mol2
assert mol2 is mol3
mol4 = stk.BuildingBlock(
smiles='NCCCN',
functional_groups=['aldehyde'],
use_cache=True
)
assert mol3 is not mol4
# Make sure that charged molecules are handled correctly.
mol5 = stk.BuildingBlock(
smiles='NC[C-]CN',
functional_groups=['amine'],
use_cache=True
)
assert mol5 is not mol0
# Test that all values are initialized correctly.
assert len(mol5.func_groups) == 2
fg_types = stk.dedupe(fg.fg_type.name for fg in mol5.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol5.atoms) == 13
assert len(mol5.bonds) == 12
atom_count = {
(stk.C, 0): 2,
(stk.C, -1): 1,
(stk.N, 0): 2,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol5.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 4,
frozenset({stk.H, stk.C}): 4,
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol5.bonds
)
mol6 = stk.BuildingBlock(
smiles='[N-]CCCN',
functional_groups=['amine'],
use_cache=True
)
assert mol6 is not mol5 and mol6 is not mol0
# Test that all values are initialized correctly.
assert len(mol6.func_groups) == 1
fg_types = stk.dedupe(fg.fg_type.name for fg in mol6.func_groups)
assert sum(1 for _ in fg_types) == 1
assert len(mol6.atoms) == 13
assert len(mol6.bonds) == 12
atom_count = {
(stk.C, 0): 3,
(stk.N, 0): 1,
(stk.N, -1): 1,
(stk.H, 0): 8,
}
assert atom_count == Counter(
(a.__class__, a.charge) for a in mol6.atoms
)
expected_bonds = {
frozenset({stk.N, stk.C}): 2,
frozenset({stk.C}): 2,
frozenset({stk.H, stk.N}): 2,
frozenset({stk.H, stk.C}): 6
}
assert expected_bonds == Counter(
frozenset({b.atom1.__class__, b.atom2.__class__})
for b in mol6.bonds
)
def test_get_bonder_ids(aldehyde3):
# Make sure that by default all bonder ids are yielded.
all_ids = []
for func_group in aldehyde3.func_groups:
all_ids.extend(func_group.get_bonder_ids())
all_ids.sort()
default_ids = sorted(aldehyde3.get_bonder_ids())
assert default_ids == all_ids
# Make sure that providing all fg ids explicitly is the same
# as default behaviour.
fg_ids = range(len(aldehyde3.func_groups))
explicit_ids = sorted(aldehyde3.get_bonder_ids(fg_ids=fg_ids))
assert default_ids == explicit_ids
# Make sure when providing a subset of fg ids, only those are
# returned.
subset_ids = []
fgs = [aldehyde3.func_groups[0], aldehyde3.func_groups[2]]
for func_group in fgs:
subset_ids.extend(func_group.get_bonder_ids())
subset_ids.sort()
returned_subset_ids = sorted(
aldehyde3.get_bonder_ids(fg_ids=[0, 2])
)
assert returned_subset_ids == subset_ids
def test_get_bonder_centroids(tmp_aldehyde3):
# Set the position of all bonder atoms to (0, 0, 0).
bonder_ids = list(tmp_aldehyde3.get_bonder_ids())
coords = tmp_aldehyde3.get_position_matrix()
coords[bonder_ids, :] = np.zeros((len(bonder_ids), 3))
tmp_aldehyde3.set_position_matrix(coords)
# Check that the bonder centroids are all at (0, 0, 0).
for i, centroid in enumerate(tmp_aldehyde3.get_bonder_centroids()):
assert np.allclose(centroid, [0, 0, 0], 1e-6)
assert i == 2
# Set the position of the bonder atoms in functional groups 1 and 2
# to (1, 1, 1).
fg_ids = [1, 2]
bonder_ids = list(tmp_aldehyde3.get_bonder_ids(fg_ids=fg_ids))
coords[bonder_ids, :] = np.ones((len(bonder_ids), 3))
tmp_aldehyde3.set_position_matrix(coords)
# Check that the bonder centroids of functional groups 1 and 2 are
# at (1, 1, 1).
centroids = tmp_aldehyde3.get_bonder_centroids(fg_ids=[1, 2])
for i, centroid in enumerate(centroids):
assert np.allclose(centroid, [1, 1, 1], 1e-6)
assert i == 1
# Check that the bonder centroid of functional group 0 is still at
# (0, 0, 0).
centroids = tmp_aldehyde3.get_bonder_centroids(fg_ids=[0])
for i, centroid in enumerate(centroids):
assert np.allclose(centroid, [0, 0, 0], 1e-6)
assert i == 0
def test_get_bonder_plane(tmp_amine4):
# First check that when 3 fgs are used, the bonder centroids all
# sit on the plane.
for fg_ids in it.combinations(range(4), 3):
a, b, c, d = tmp_amine4.get_bonder_plane(fg_ids=fg_ids)
for x, y, z in tmp_amine4.get_bonder_centroids(fg_ids=fg_ids):
product = a*x + b*y + c*z
assert abs(product-d) < 1e-6
# When 4 are used make sure that a best fit plane is produced.
# Ensure that centroids are placed such that the plane of best fit
# Goes through two of the centroids and is equidistant from the
# other two.
bonder_ids = list(tmp_amine4.get_bonder_ids())
coords = tmp_amine4.get_position_matrix()
coords[bonder_ids[0]] = [1, 1, 0]
coords[bonder_ids[1]] = [0, 0, 0.5]
coords[bonder_ids[2]] = [0, 0, -0.5]
coords[bonder_ids[3]] = [1, -1, 0]
tmp_amine4.set_position_matrix(coords)
a, b, c, d = tmp_amine4.get_bonder_plane()
for x, y, z in tmp_amine4.get_bonder_centroids(fg_ids=[0, 3]):
product = a*x + b*y + c*z
assert abs(product-d) < 1e-6
for x, y, z in tmp_amine4.get_bonder_centroids(fg_ids=[1, 2]):
product = a*x + b*y + c*z
assert abs(0.5 - abs(product-d)) < 1e-6
def test_get_bonder_plane_normal(tmp_amine4):
bonder_ids = list(tmp_amine4.get_bonder_ids())
other_ids = [
id_ for id_ in range(len(tmp_amine4.atoms))
if id_ not in bonder_ids
]
coords = tmp_amine4.get_position_matrix()
coords[bonder_ids[0]] = [1, 1, 0]
coords[bonder_ids[1]] = [0, 0, 0.5]
coords[bonder_ids[2]] = [0, 0, -0.5]
coords[bonder_ids[3]] = [1, -1, 0]
# Set the centroid of the molecule so that the plane normal
# has a positive direction.
coords[other_ids, 2] = 10
tmp_amine4.set_position_matrix(coords)
assert np.allclose(
a=tmp_amine4.get_bonder_plane_normal(),
b=[0, 0, 1],
atol=1e-6
)
def test_get_bonder_distances(tmp_amine4):
# Place all bonders on a line.
coords = tmp_amine4.get_position_matrix()
for bonder_id in tmp_amine4.get_bonder_ids():
coords[bonder_id] = [bonder_id, 0, 0]
tmp_amine4.set_position_matrix(coords)
# Test default behaviour.
distances = tmp_amine4.get_bonder_distances()
for i, (fg1, fg2, distance) in enumerate(distances):
coord1 = tmp_amine4.func_groups[fg1].bonders[0].id
coord2 = tmp_amine4.func_groups[fg2].bonders[0].id
assert abs(distance - abs(coord1 - coord2)) < 1e-6
assert i == 5
# Test explicilty setting fg_ids.
distances = tmp_amine4.get_bonder_distances(fg_ids=[0, 2, 3])
for i, (fg1, fg2, distance) in enumerate(distances):
coord1 = tmp_amine4.func_groups[fg1].bonders[0].id
coord2 = tmp_amine4.func_groups[fg2].bonders[0].id
assert abs(distance - abs(coord1 - coord2)) < 1e-6
assert i == 2
def test_get_bonder_direction_vectors(tmp_amine4):
pos_mat = tmp_amine4.get_position_matrix()
# Set the coordinate of each bonder to the id of the fg.
for fg_id, fg in enumerate(tmp_amine4.func_groups):
for bonder in fg.get_bonder_ids():
pos_mat[bonder] = [fg_id, fg_id, fg_id]
tmp_amine4.set_position_matrix(pos_mat)
dir_vectors = tmp_amine4.get_bonder_direction_vectors()
for i, (id1, id2, v) in enumerate(dir_vectors):
# Calculate the expected direction vector based on ids.
d = stk.normalize_vector(np.array([id2]*3) - np.array([id1]*3))
assert np.allclose(d, stk.normalize_vector(v), atol=1e-8)
assert i == 5
# Test explicitly setting fg_ids.
dir_vectors = tmp_amine4.get_bonder_direction_vectors(
fg_ids=[0, 3]
)
for i, (id1, id2, v) in enumerate(dir_vectors):
# Calculate the expected direction vector based on ids.
d = stk.normalize_vector(np.array([id2]*3) - np.array([id1]*3))
assert np.allclose(d, stk.normalize_vector(v), atol=1e-8)
assert i == 0
def test_get_centroid_centroid_direction_vector(tmp_amine4):
bonder_ids = list(tmp_amine4.get_bonder_ids())
other_ids = [
id_ for id_ in range(len(tmp_amine4.atoms))
if id_ not in bonder_ids
]
coords = tmp_amine4.get_position_matrix()
for bonder_id in bonder_ids:
coords[bonder_id] = [10, 0, 0]
coords[other_ids] = np.zeros((len(other_ids), 3))
tmp_amine4.set_position_matrix(coords)
dir_vector = tmp_amine4.get_centroid_centroid_direction_vector()
assert np.allclose(
a=stk.normalize_vector(dir_vector),
b=[-1, 0, 0],
atol=1e-8
)
# Test explicitly setting the fg_ids.
fg_ids = [0, 2]
for bonder_id in tmp_amine4.get_bonder_ids(fg_ids=fg_ids):
coords[bonder_id] = [-100, 0, 0]
tmp_amine4.set_position_matrix(coords)
dir_vector = tmp_amine4.get_centroid_centroid_direction_vector(
fg_ids=fg_ids
)
assert np.allclose(
a=stk.normalize_vector(dir_vector),
b=[1, 0, 0],
atol=1e-8
)
def test_get_identity_key(amine2, amine2_conf1, amine2_alt1):
assert amine2.get_identity_key() == amine2_conf1.get_identity_key()
assert amine2.get_identity_key() != amine2_alt1.get_identity_key()
def test_dump_and_load(tmp_amine2):
path = os.path.join('building_block_tests_output', 'mol.dump')
tmp_amine2.test_attr1 = 'something'
tmp_amine2.test_attr2 = 12
tmp_amine2.test_attr3 = ['12', 'something', 21]
tmp_amine2.test_attr4 = 'skip'
include_attrs = ['test_attr1', 'test_attr2', 'test_attr3']
# Add some custom atom properties.
tmp_amine2.atoms[0].some_prop = 'custom atom prop'
tmp_amine2.dump(path, include_attrs)
mol2 = stk.Molecule.load(path)
assert tmp_amine2 is not mol2
fgs = it.zip_longest(mol2.func_groups, tmp_amine2.func_groups)
for fg1, fg2 in fgs:
atoms = it.zip_longest(fg1.atoms, fg2.atoms)
bonders = it.zip_longest(fg1.bonders, fg2.bonders)
deleters = it.zip_longest(fg1.deleters, fg2.deleters)
for a1, a2 in it.chain(atoms, bonders, deleters):
assert a1.__class__ is a2.__class__
assert a1.id == a2.id
assert tmp_amine2.test_attr1 == mol2.test_attr1
assert tmp_amine2.test_attr2 == mol2.test_attr2
assert tmp_amine2.test_attr3 == mol2.test_attr3
assert not hasattr(mol2, 'test_attr4')
for a1, a2 in zip(tmp_amine2.atoms, mol2.atoms):
assert vars(a1) == vars(a2)
mol3 = stk.Molecule.load(path, use_cache=True)
assert mol3 is not mol2
mol4 = stk.Molecule.load(path, use_cache=True)
assert mol3 is mol4
| 30.910814 | 71 | 0.619468 | 4,046 | 27,727 | 4.029659 | 0.060801 | 0.015947 | 0.027907 | 0.025761 | 0.819431 | 0.80238 | 0.781955 | 0.775331 | 0.746749 | 0.729882 | 0 | 0.040552 | 0.258268 | 27,727 | 896 | 72 | 30.945313 | 0.752212 | 0.088434 | 0 | 0.655267 | 0 | 0 | 0.027995 | 0.00571 | 0 | 0 | 0 | 0 | 0.212038 | 1 | 0.019152 | false | 0 | 0.009576 | 0 | 0.028728 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
95aec8a9202d1522cc752698f7450d7c1ae2e976 | 1,786 | py | Python | grvx/viz/smooth.py | UMCU-RIBS/grvx | 0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8 | [
"MIT"
] | 1 | 2021-11-25T08:12:48.000Z | 2021-11-25T08:12:48.000Z | grvx/viz/smooth.py | UMCU-RIBS/grvx | 0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8 | [
"MIT"
] | null | null | null | grvx/viz/smooth.py | UMCU-RIBS/grvx | 0343ffa3a211f28bbffb18d1fb4b2cadc4fda8a8 | [
"MIT"
] | null | null | null | from numpy import gradient
import plotly.graph_objs as go
from bidso.utils import read_tsv
from .paths import get_path
def plot_smooth(parameters, frequency_band, subject):
corr_file = get_path(parameters, 'corr_tsv', frequency_band=frequency_band, subject=subject)
if corr_file is None:
return
results = read_tsv(corr_file)
traces = [
dict(
x=results['Kernel'],
y=results['Rsquared'],
marker=dict(
color='black',
),
),
]
layout = go.Layout(
xaxis=dict(
dtick=4,
range=(
0,
parameters['fmri']['at_elec']['kernel_end']
),
),
yaxis=dict(
dtick=0.02,
rangemode='tozero',
),
)
fig = go.Figure(
data=traces,
layout=layout,
)
return fig
def plot_gradient(parameters, frequency_band, subject):
corr_file = get_path(parameters, 'corr_tsv', frequency_band=frequency_band, subject=subject)
if corr_file is None:
return
results = read_tsv(corr_file)
traces = [
dict(
x=results['Kernel'],
y=gradient(gradient(results['Rsquared'])),
marker=dict(
color='black',
),
),
]
layout = go.Layout(
xaxis=dict(
dtick=4,
range=(
0,
parameters['fmri']['at_elec']['kernel_end']
),
),
yaxis=dict(
dtick=0.002,
range=(-0.006, 0.006),
),
)
fig = go.Figure(
data=traces,
layout=layout,
)
return fig
| 20.767442 | 96 | 0.478723 | 174 | 1,786 | 4.758621 | 0.333333 | 0.094203 | 0.096618 | 0.072464 | 0.789855 | 0.789855 | 0.789855 | 0.789855 | 0.789855 | 0.688406 | 0 | 0.01813 | 0.413214 | 1,786 | 85 | 97 | 21.011765 | 0.771947 | 0 | 0 | 0.735294 | 0 | 0 | 0.057111 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.058824 | 0 | 0.147059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
252f4c93d478714b6e99fa5ebbcfdc4ee210c976 | 43 | py | Python | c2p2/handlers/__init__.py | nanvel/mdpages | 3bcee2cd14d74ce4668b9009a39a924e73ceae4b | [
"MIT"
] | null | null | null | c2p2/handlers/__init__.py | nanvel/mdpages | 3bcee2cd14d74ce4668b9009a39a924e73ceae4b | [
"MIT"
] | null | null | null | c2p2/handlers/__init__.py | nanvel/mdpages | 3bcee2cd14d74ce4668b9009a39a924e73ceae4b | [
"MIT"
] | null | null | null | from .github import *
from .pages import *
| 14.333333 | 21 | 0.72093 | 6 | 43 | 5.166667 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.186047 | 43 | 2 | 22 | 21.5 | 0.885714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c25c2a67c0c918cceda8ebf24a8192b628203499 | 40 | py | Python | rescue/02-housetest.py | zachpanz88/gset-robotics | cc318aad88b58adb3fec2045904f36623edbd3fe | [
"MIT"
] | null | null | null | rescue/02-housetest.py | zachpanz88/gset-robotics | cc318aad88b58adb3fec2045904f36623edbd3fe | [
"MIT"
] | null | null | null | rescue/02-housetest.py | zachpanz88/gset-robotics | cc318aad88b58adb3fec2045904f36623edbd3fe | [
"MIT"
] | null | null | null | from rescue import house
house.house()
| 10 | 24 | 0.775 | 6 | 40 | 5.166667 | 0.666667 | 0.645161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15 | 40 | 3 | 25 | 13.333333 | 0.911765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
c25d567972232c61b3a252d5679288e58ab1a6f0 | 43 | py | Python | tfxc/__init__.py | sfujiwara/tfxc | 5469862e7c6bdac89edb0bd7cbc1808b8c7e7665 | [
"MIT"
] | null | null | null | tfxc/__init__.py | sfujiwara/tfxc | 5469862e7c6bdac89edb0bd7cbc1808b8c7e7665 | [
"MIT"
] | null | null | null | tfxc/__init__.py | sfujiwara/tfxc | 5469862e7c6bdac89edb0bd7cbc1808b8c7e7665 | [
"MIT"
] | null | null | null | from tfxc.bigquery import BigQueryTableGen
| 21.5 | 42 | 0.883721 | 5 | 43 | 7.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093023 | 43 | 1 | 43 | 43 | 0.974359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c2b156aa03ec1f9d2bafa5359c44950657e3acca | 133 | py | Python | testint_core/__init__.py | JohnOmernik/jupyter_testint | 4b5ea1c66f743b6b247448abf8cadbf8707d2f4c | [
"Apache-2.0"
] | null | null | null | testint_core/__init__.py | JohnOmernik/jupyter_testint | 4b5ea1c66f743b6b247448abf8cadbf8707d2f4c | [
"Apache-2.0"
] | null | null | null | testint_core/__init__.py | JohnOmernik/jupyter_testint | 4b5ea1c66f743b6b247448abf8cadbf8707d2f4c | [
"Apache-2.0"
] | null | null | null | from integration_core import Integration
from testint_core.testint_base import Testint
from testint_core._version import __version__
| 33.25 | 45 | 0.894737 | 18 | 133 | 6.111111 | 0.388889 | 0.2 | 0.272727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090226 | 133 | 3 | 46 | 44.333333 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c2c076a0559f62a5579abaa45cadcbcde694ebd6 | 81 | py | Python | toolchain/riscv/MSYS/python/Lib/test/dis_module.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 207 | 2018-10-01T08:53:01.000Z | 2022-03-14T12:15:54.000Z | toolchain/riscv/MSYS/python/Lib/test/dis_module.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 8 | 2019-06-29T14:18:51.000Z | 2022-02-19T07:30:27.000Z | toolchain/riscv/MSYS/python/Lib/test/dis_module.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 76 | 2020-03-16T01:47:46.000Z | 2022-03-21T16:37:07.000Z |
# A simple module for testing the dis module.
def f(): pass
def g(): pass
| 13.5 | 46 | 0.62963 | 14 | 81 | 3.642857 | 0.785714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.271605 | 81 | 5 | 47 | 16.2 | 0.864407 | 0.530864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | true | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
c2ca8aa47cb3ec4390fdb776d42ef735838002c2 | 37,305 | py | Python | angr_platforms/tricore/rc_instr.py | shahinsba/angr-platforms | 86f9ea90c396fb5561d0196a2d1a873e573b0294 | [
"BSD-2-Clause"
] | null | null | null | angr_platforms/tricore/rc_instr.py | shahinsba/angr-platforms | 86f9ea90c396fb5561d0196a2d1a873e573b0294 | [
"BSD-2-Clause"
] | null | null | null | angr_platforms/tricore/rc_instr.py | shahinsba/angr-platforms | 86f9ea90c396fb5561d0196a2d1a873e573b0294 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
""" rc_instr.py
Implementation of RC format instructions.
"""
import sys
from pyvex.lifting.util import Type, Instruction
import bitstring
from .rtl import * # pylint: disable=[wildcard-import, unused-wildcard-import]
from .logger import log_this, log_val
class RC_Instructions_8B(Instruction):
""" A class for instructions with OP=8B """
name = 'RC_Instructions_8B ...'
op = "{0}{1}".format(bin(8)[2:].zfill(4), bin(0xb)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
a = tmp[20:24]
const9 = bitstring.BitArray(bin="{0}".format(tmp[11:20].bin))
const9 = bitstring.BitArray(bin="{0}".format(tmp[11:20].bin.zfill(12)))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:11]))
op2 = int(op2.bin, 2)
c = tmp[:4]
if op2 == 0x0:
self.name = "RC_ADD"
elif op2 == 0x2:
self.name = "RC_ADDS"
elif op2 == 0x3:
self.name = "RC_ADDS.U"
elif op2 == 0x4:
self.name = "RC_ADDX"
elif op2 == 0x5:
self.name = "RC_ADDC"
elif op2 == 0x8:
self.name = "RC_RSUB"
elif op2 == 0xa:
self.name = "RC_RSUBS"
elif op2 == 0xb:
self.name = "RC_RSUBS.U"
elif op2 == 0xe:
self.name = "RC_ABSDIF"
elif op2 == 0xf:
self.name = "RC_ABSDIFS"
elif op2 == 0x20:
self.name = "RC_AND.EQ"
elif op2 == 0x24:
self.name = "RC_AND.GE"
elif op2 == 0x25:
self.name = "RC_AND.GE.U"
elif op2 == 0x22:
self.name = "RC_AND.LT"
elif op2 == 0x23:
self.name = "RC_AND.LT.U"
elif op2 == 0x21:
self.name = "RC_AND.NE"
elif op2 == 0x10:
self.name = "RC_EQ"
elif op2 == 0x11:
self.name = "RC_NE"
elif op2 == 0x12:
self.name = "RC_LT"
elif op2 == 0x13:
self.name = "RC_LT.U"
elif op2 == 0x14:
self.name = "RC_GE"
elif op2 == 0x15:
self.name = "RC_GE_U"
elif op2 == 0x18:
self.name = "RC_MIN"
elif op2 == 0x19:
self.name = "RC_MIN.U"
elif op2 == 0x1A:
self.name = "RC_MAX"
elif op2 == 0x1B:
self.name = "RC_MAX.U"
elif op2 == 0x27:
self.name = "RC_OR.EQ"
elif op2 == 0x2B:
self.name = "RC_OR.GE"
elif op2 == 0x2C:
self.name = "RC_OR.GE.U"
elif op2 == 0x29:
self.name = "RC_OR.LT"
elif op2 == 0x2A:
self.name = "RC_OR.LT.U"
elif op2 == 0x28:
self.name = "RC_OR.NE"
elif op2 == 0x2F:
self.name = "RC_XOR.EQ"
elif op2 == 0x33:
self.name = "RC_XOR.GE"
elif op2 == 0x34:
self.name = "RC_XOR.GE.U"
elif op2 == 0x31:
self.name = "RC_XOR.LT"
elif op2 == 0x32:
self.name = "RC_XOR.LT.U"
elif op2 == 0x30:
self.name = "RC_XOR.NE"
elif op2 == 0x37:
self.name = "RC_SH.EQ"
elif op2 == 0x3B:
self.name = "RC_SH.GE"
elif op2 == 0x3C:
self.name = "RC_SH.GE.U"
elif op2 == 0x39:
self.name = "RC_SH.LT"
elif op2 == 0x3A:
self.name = "RC_SH.LT.U"
elif op2 == 0x38:
self.name = "RC_SH.NE"
elif op2 == 0x56:
self.name = "RC_EQANY.B"
elif op2 == 0x76:
self.name = "RC_EQANY.H"
else:
self.name = "UNKNOWN"
data = {"a": int(a.hex, 16),
"const9": int(const9.hex, 16),
"op2": op2,
"c": int(c.hex, 16)}
log_this(self.name, data, hex(self.addr))
return data
@property
def max_pos(self):
return self.constant(INT32_MAX_POS, Type.int_32)
@property
def max_neg(self):
return self.constant(INT32_MAX_NEG, Type.int_32)
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_const9(self):
return self.constant(self.data['const9'], Type.int_9).cast_to(Type.int_32)
def get_const9_sign_extended(self):
return self.constant(self.data['const9'], Type.int_9).cast_to(Type.int_32, signed=True)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_const9(), self.get_const9_sign_extended()
def compute_result(self, *args):
d_a = args[0]
const9 = args[1]
const9_sign_extended = args[2]
result = ""
if self.data['op2'] == 0x0: # ADD
result = d_a + const9_sign_extended
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x2: # ADDS
result = ssov(d_a + const9_sign_extended, 32)
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x3: # ADDS.U
result = suov(d_a + const9_sign_extended, 32)
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x4: # ADDX
result = d_a + const9_sign_extended
# compute flags
c = carry(d_a, const9_sign_extended, 0)
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x5: # ADDC
psw = self.get_psw()
result = d_a + const9_sign_extended + psw[31]
# set flags
c = carry(d_a, const9_sign_extended, psw[31])
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x8: # RSUB
result = const9_sign_extended - d_a
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0xa: # RSUBS
result = ssov32(const9_sign_extended - d_a, self.max_pos, self.max_neg)
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0xb: # RSUBS.U
result = suov32_sub(const9_sign_extended - d_a) # Unsigned
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0xe: # ABSDIF
condition = extend_to_32_bits(d_a > const9_sign_extended)
result = ((d_a - const9_sign_extended) & condition) | ((const9_sign_extended - d_a) & ~condition)
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0xf: # ABSDIFS
condition = extend_to_32_bits(d_a > const9_sign_extended)
result = ((d_a - const9_sign_extended) & condition) | ((const9_sign_extended - d_a) & ~condition)
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x20: # RC_AND.EQ
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] & (d_a == const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x24: # RC_AND.GE
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] & (d_a >= const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x25: # RC_AND.GE.U
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] & (d_a >= const9) # Unsigned
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x22: # RC_AND.LT
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] & (d_a < const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x23: # RC_AND.LT.U
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] & (d_a < const9) # Unsigned
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x21: # RC_AND.NE
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] & (d_a != const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x10: # RC_EQ
result = (d_a == const9_sign_extended)
elif self.data['op2'] == 0x11: # RC_NE
result = (d_a != const9_sign_extended)
elif self.data['op2'] == 0x12: # RC_LT
result = (d_a < const9_sign_extended)
elif self.data['op2'] == 0x13: # RC_LT.U
result = (d_a < const9_sign_extended) # Unsigned
elif self.data['op2'] == 0x14: # RC_GE
result = (d_a >= const9_sign_extended)
elif self.data['op2'] == 0x15: # RC_GE_U
result = (d_a >= const9) # Unsigned
elif self.data['op2'] == 0x18: # RC_MIN
condition = extend_to_32_bits(d_a < const9_sign_extended)
result = (d_a & condition) | (const9_sign_extended & ~condition)
elif self.data['op2'] == 0x19: # RC_MIN.U
condition = extend_to_32_bits(d_a < const9) # Unsigned
result = (d_a & condition) | (const9 & ~condition)
elif self.data['op2'] == 0x1a: # RC_MAX
condition = extend_to_32_bits(d_a > const9_sign_extended)
result = (d_a & condition) | (const9_sign_extended & ~condition)
elif self.data['op2'] == 0x1b: # RC_MAX.U
condition = extend_to_32_bits(d_a > const9) # Unsigned
result = (d_a & condition) | (const9 & ~condition)
elif self.data['op2'] == 0x27: # RC_OR.EQ
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] | (d_a == const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x2b: # RC_OR.GE
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] | (d_a >= const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x2c: # RC_OR.GE.U
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] | (d_a >= const9) # Unsigned
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x29: # RC_OR.LT
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] | (d_a < const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x2a: # RC_OR.LT.U
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] | (d_a < const9) # Unsigned
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x28: # RC_OR.NE
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] | (d_a != const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x2f: # RC_XOR.EQ
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] ^ (d_a == const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x33: # RC_XOR.GE
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] ^ (d_a >= const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x34: # RC_XOR.GE.U
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] ^ (d_a >= const9) # Unsigned
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x31: # RC_XOR.LT
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] ^ (d_a < const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x32: # RC_XOR.LT.U
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] ^ (d_a < const9) # Unsigned
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x30: # RC_XOR.NE
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
bit = d_c[0] ^ (d_a != const9_sign_extended)
result = ((d_c >> 1) << 1) | bit
elif self.data['op2'] == 0x37: # RC_SH.EQ
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
result = (d_c << 1) | (d_a == const9_sign_extended)
elif self.data['op2'] == 0x3b: # RC_SH.GE
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
result = (d_c << 1) | (d_a >= const9_sign_extended)
elif self.data['op2'] == 0x3c: # RC_SH.GE.U
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
result = (d_c << 1) | (d_a >= const9) # Unsigned
elif self.data['op2'] == 0x39: # RC_SH.LT
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
result = (d_c << 1) | (d_a < const9_sign_extended)
elif self.data['op2'] == 0x3a: # RC_SH.LT.U
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
result = (d_c << 1) | (d_a < const9) # Unsigned
elif self.data['op2'] == 0x38: # RC_SH.NE
d_c = self.get("d{0}".format(self.data['c']), Type.int_32)
result = (d_c << 1) | (d_a != const9_sign_extended)
elif self.data['op2'] == 0x56: # EQANY.B
cond_1 = ((d_a & 0xff) == (const9_sign_extended & 0xff))
cond_2 = ((d_a & (0xff << 8)) == (const9_sign_extended & (0xff << 8)))
cond_3 = ((d_a & (0xff << 16)) == (const9_sign_extended & (0xff << 16)))
cond_4 = ((d_a & (0xff << 24)) == (const9_sign_extended & (0xff << 24)))
result = cond_4 or cond_3 or cond_2 or cond_1
elif self.data['op2'] == 0x76: # EQANY.H
cond_1 = ((d_a & 0xffff) == (const9_sign_extended & 0xffff))
cond_2 = ((d_a & (0xffff << 16)) == (const9_sign_extended & (0xffff << 16)))
result = cond_2 or cond_1
return result
def commit_result(self, res):
self.put(res, self.get_dst_reg())
class RC_Instructions_8F(Instruction):
""" A class for instructions with OP=8F """
name = 'RC_Instructions_8F ...'
op = "{0}{1}".format(bin(8)[2:].zfill(4), bin(0xf)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
a = tmp[20:24]
const9 = bitstring.BitArray(bin="{0}".format(tmp[11:20].bin.zfill(12)))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:11]))
op2 = int(op2.bin, 2)
c = tmp[:4]
if op2 == 0x0:
self.name = "RC_SH"
elif op2 == 0x1:
self.name = "RC_SHA"
elif op2 == 0x8:
self.name = "RC_AND"
elif op2 == 0x9:
self.name = "RC_NAND"
elif op2 == 0xb:
self.name = "RC_NOR"
elif op2 == 0xa:
self.name = "RC_OR"
elif op2 == 0xf:
self.name = "RC_ORN"
elif op2 == 0xc:
self.name = "RC_XOR"
elif op2 == 0xe:
self.name = "RC_ANDN"
elif op2 == 0x40:
self.name = "RC_SH.H"
elif op2 == 0x41:
self.name = "RC_SHA.H"
else:
self.name = "UNKNOWN"
data = {"a": int(a.hex, 16),
"const9": int(const9.hex, 16),
"op2": op2,
"c": int(c.hex, 16)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_const9(self):
return self.constant(self.data['const9'], Type.int_9).cast_to(Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_const9()
def compute_result(self, *args):
d_a = args[0]
const9 = args[1]
result = ""
if self.data['op2'] == 0x0: # Shift
sha = self.data['const9'] & 0x3f # const9[5:0]
cond_sha_pos = (sha & 0x20 == 0) # SHA is positive
result_1 = (d_a << sha) & extend_to_32_bits(cond_sha_pos)
result_2 = 0
if not sha == 0: # sha=0
cond_sha_neg = extend_to_6_bits(cond_sha_pos) ^ 0x3f
shift_count = twos_comp(sha, 6) # if sha<0
if shift_count < 0:
shift_count = shift_count * (-1)
cond_mask_2 = extend_bits((d_a & 0x80000000 != 0), shift_count) # D[a][31] is set
mask_2 = (((1 << shift_count) - 1) << (32 - shift_count)) & cond_mask_2
result_2 = (mask_2 | (d_a >> shift_count)) & extend_to_32_bits(cond_sha_neg)
# final result & flags
result = result_1 | result_2
elif self.data['op2'] == 0x1: # SHA
sha = self.data['const9'] & 0x3f # const9[5:0]
cond_sha_pos = (sha & 0x20 == 0) # SHA is positive
result_1 = (d_a << sha) & extend_to_32_bits(cond_sha_pos)
# compute carry out
lower_limit = (32 - sha) & extend_to_6_bits(cond_sha_pos)
if lower_limit == 32: # sha=0
carry_out_1_mask = 0
else:
carry_out_1_mask = (((1 << 32) - 1) >> (31 - lower_limit)) << (31 - lower_limit)
cond_carry_out_1 = ((sha & 0x3f) == 0x3f) & cond_sha_pos # if const9[5:0]
carry_out_1 = ((d_a & carry_out_1_mask) != 0) & extend_to_32_bits(cond_carry_out_1)
result_2 = 0
carry_out_2 = 0
if not sha == 0: # sha=0
cond_sha_neg = extend_to_6_bits(cond_sha_pos) ^ 0x3f
shift_count = twos_comp(sha, 6) # if sha<0
if shift_count < 0:
shift_count = shift_count * (-1)
cond_mask_2 = extend_bits((d_a & 0x80000000 != 0), shift_count) # D[a][31] is set
mask_2 = (((1 << shift_count) - 1) << (32 - shift_count)) & cond_mask_2
result_2 = (mask_2 | (d_a >> shift_count)) & extend_to_32_bits(cond_sha_neg)
# compute carry out
carry_out_2_mask = (1 << (shift_count-1)) - 1
carry_out_2 = ((d_a & carry_out_2_mask) != 0) & (cond_sha_pos ^ 1)
# final result & flags
result = result_1 | result_2
c = carry_out_1 | carry_out_2
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x2: # SHAS
sha = self.data['const9'] & 0x3f # const9[5:0]
cond_sha_pos = (sha & 0x20 == 0) # SHA is positive
result_1 = (d_a << sha) & extend_to_32_bits(cond_sha_pos)
# compute carry out
lower_limit = (32 - sha) & extend_to_6_bits(cond_sha_pos)
if lower_limit == 32: # sha=0
carry_out_1_mask = 0
else:
carry_out_1_mask = (((1 << 32) - 1) >> (31 - lower_limit)) << (31 - lower_limit)
cond_carry_out_1 = ((sha & 0x3f) == 0x3f) & cond_sha_pos # if const9[5:0]
carry_out_1 = ((d_a & carry_out_1_mask) != 0) & extend_to_32_bits(cond_carry_out_1)
result_2 = 0
carry_out_2 = 0
if not sha == 0: # sha=0
cond_sha_neg = extend_to_6_bits(cond_sha_pos) ^ 0x3f
shift_count = twos_comp(sha, 6) # if sha<0
if shift_count < 0:
shift_count = shift_count * (-1)
cond_mask_2 = extend_bits((d_a & 0x80000000 != 0), shift_count) # D[a][31] is set
mask_2 = (((1 << shift_count) - 1) << (32 - shift_count)) & cond_mask_2
result_2 = (mask_2 | (d_a >> shift_count)) & extend_to_32_bits(cond_sha_neg)
# compute carry out
carry_out_2_mask = (1 << (shift_count-1)) - 1
carry_out_2 = ((d_a & carry_out_2_mask) != 0) & (cond_sha_pos ^ 1)
# final result & flags
result = ssov(result_1 | result_2, 32)
c = carry_out_1 | carry_out_2
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x8: # AND
result = d_a & const9
elif self.data['op2'] == 0x9: # NAND
result = ~(d_a & const9)
elif self.data['op2'] == 0xb: # NOR
result = ~(d_a | const9)
elif self.data['op2'] == 0xa: # OR
result = d_a | const9
elif self.data['op2'] == 0xf: # ORN
result = d_a | (~const9)
elif self.data['op2'] == 0xc: # XOR
result = d_a ^ const9
elif self.data['op2'] == 0xe: # ANDN
result = d_a & ~const9
elif self.data['op2'] == 0x40: # SH.H
sha = self.data['const9'] & 0x1f # const9[4:0]
cond_sha_pos = extend_to_16_bits(sha & 0x10 == 0) # SHA is positive
d_a_hw_1 = d_a >> 16 # 16 MSB bits [31:16]
d_a_hw_2 = d_a & 0xffff # 16 LSB bits [15:0]
result_hw_1_pos = (d_a_hw_1 << sha) & cond_sha_pos
result_hw_2_pos = (d_a_hw_2 << sha) & cond_sha_pos
result_hw_1_neg = 0
result_hw_2_neg = 0
if not sha == 0: # sha=0
cond_sha_neg = cond_sha_pos ^ 0xffff
shift_count = twos_comp(sha, 5) # if sha<0
if shift_count < 0:
shift_count = shift_count * (-1) # TODO: get abs value
cond_mask_hw_1_neg = extend_bits((d_a_hw_1 & 0x8000 != 0), shift_count) # D[a][31] is set
mask_2 = (((1 << shift_count) - 1) << (16 - shift_count)) & cond_mask_hw_1_neg
result_hw_1_neg = (mask_2 | (d_a_hw_1 >> shift_count)) & cond_sha_neg
cond_mask_hw_2_neg = extend_bits((d_a_hw_2 & 0x8000 != 0), shift_count) # D[a][15] is set
mask_2 = (((1 << shift_count) - 1) << (16 - shift_count)) & cond_mask_hw_2_neg
result_hw_2_neg = (mask_2 | (d_a_hw_2 >> shift_count)) & cond_sha_neg
# final result & flags
result_hw_1 = result_hw_1_pos | result_hw_1_neg
result_hw_2 = result_hw_2_pos | result_hw_2_neg
result = (result_hw_1 << 16) | result_hw_2
elif self.data['op2'] == 0x41: # SHA.H
sha = self.data['const9'] & 0x1f # const9[4:0]
cond_sha_pos = (sha & 0x10 == 0) # SHA is positive
result_hw_0_pos = ((d_a & 0xffff) << sha) & extend_to_16_bits(cond_sha_pos)
result_hw_1_pos = ((d_a >> 16) << sha) & extend_to_16_bits(cond_sha_pos)
result_hw_0_neg = 0
result_hw_1_neg = 0
if not sha == 0: # sha=0
cond_sha_neg = extend_to_16_bits(cond_sha_pos) ^ 0xffff
shift_count = twos_comp(sha, 5) # if sha<0
if shift_count < 0:
shift_count = shift_count * (-1)
cond_mask_2 = extend_bits((d_a & 0x80000000 != 0), shift_count) # D[a][31] is set
mask_2 = (((1 << shift_count) - 1) << (16 - shift_count)) & cond_mask_2
result_hw_0_neg = (mask_2 | ((d_a & 0xffff) >> shift_count)) & extend_to_16_bits(cond_sha_neg)
result_hw_1_neg = (mask_2 | ((d_a >> 16) >> shift_count)) & extend_to_16_bits(cond_sha_neg)
# final result
result_1 = (result_hw_1_pos << 16) | result_hw_0_pos
result_2 = (result_hw_1_neg << 16) | result_hw_0_neg
result = result_1 | result_2
return result
def commit_result(self, res):
self.put(res, self.get_dst_reg())
class RC_Instructions_53(Instruction):
""" A class for instructions with OP=53 """
name = 'RC_MUL_Instructions_53 ...'
op = "{0}{1}".format(bin(5)[2:].zfill(4), bin(3)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
a = tmp[20:24]
const9 = bitstring.BitArray(bin="{0}".format(tmp[11:20].bin.zfill(12)))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:11]))
op2 = int(op2.bin, 2)
c = tmp[:4]
if op2 == 0x1:
self.name = "RC_MUL (32-bit)"
elif op2 == 0x2:
self.name = "RC_MUL.U"
elif op2 == 0x3:
self.name = "RC_MUL (64-bit)"
elif op2 == 0x4:
self.name = "RC_MULS.U"
elif op2 == 0x5:
self.name = "RC_MULS"
else:
self.name = "UNKNOWN"
data = {"a": int(a.hex, 16),
"const9": int(const9.hex, 16),
"op2": op2,
"c": int(c.hex, 16)}
log_this(self.name, data, hex(self.addr))
return data
@property
def max_pos(self):
return self.constant(INT32_MAX_POS, Type.int_32).cast_to(Type.int_64, signed=True)
@property
def max_neg(self):
return self.constant(INT32_MAX_NEG, Type.int_32).cast_to(Type.int_64, signed=True)
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_const9(self):
return self.constant(self.data['const9'], Type.int_32)
def get_const9_sign_extended(self):
return self.constant(self.data['const9'], Type.int_9).cast_to(Type.int_32, signed=True)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_const9(), self.get_const9_sign_extended()
def compute_result(self, *args):
d_a = args[0]
const9 = args[1]
const9_sign_extended = args[2]
result = ""
if self.data['op2'] == 0x1: # MUL (32-bit)
result = d_a * const9_sign_extended
self.put(result, self.get_dst_reg())
# flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x2: # MUL.U
result = d_a.cast_to(Type.int_64) * const9.cast_to(Type.int_64) # Unsigned
self.put(result & 0xffffffff, "d{0}".format(self.data['c']))
self.put(result >> 32, "d{0}".format(self.data['c']+1))
# flags
c = 0
v = overflow_64(result).cast_to(Type.int_32)
av = advanced_overflow_64(result).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x3: # MUL (64-bit)
result = (d_a * const9_sign_extended).cast_to(Type.int_64, signed=True)
self.put(result & 0xffffffff, "d{0}".format(self.data['c']))
self.put(result >> 32, "d{0}".format(self.data['c']+1))
# flags
c = 0
v = overflow_64(result).cast_to(Type.int_32)
av = advanced_overflow_64(result).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x5: # MULS
result = (d_a * const9_sign_extended).cast_to(Type.int_64)
result = ssov32(result, self.max_pos, self.max_neg)
self.put(result, self.get_dst_reg())
# flags
c = 0
v = overflow(result).cast_to(Type.int_32)
av = advanced_overflow(result).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
elif self.data['op2'] == 0x4: # MULS.U
result = d_a * const9 # Unsigned
result = suov32_pos(result)
self.put(result, self.get_dst_reg())
# set flags
c = 0
v = overflow(result).cast_to(Type.int_32)
av = advanced_overflow(result).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
else:
print("Error: Unknown OP2={0}!".format(self.data['op2']))
print("RC instruction OP=53, OP2=Unknown")
sys.exit(1)
class RC_Instructions_AD(Instruction):
""" A class for instructions with OP=AD """
name = 'RC Instructions (OP=0xAD) ...'
op = "{0}{1}".format(bin(0xa)[2:].zfill(4), bin(0xd)[2:].zfill(4))
bin_format = op + 'a'*4 + 'b'*4 + 'c'*4 + 'd'*4 + 'e'*4 + 'f'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
tmp = bitstring.BitArray(bin="{0}{1}{2}{3}{4}{5}".format(data['e'],
data['f'],
data['c'],
data['d'],
data['a'],
data['b']))
const9 = bitstring.BitArray(bin="{0}".format(tmp[11:20]))
op2 = bitstring.BitArray(bin="{0}".format(tmp[4:11]))
op2 = int(op2.bin, 2)
if op2 == 0xad:
self.name = "RC_SYSCALL"
else:
self.name = "UNKNOWN"
data = {"const9": int(const9.hex, 16),
"op2": op2}
log_this(self.name, data, hex(self.addr))
return data
def get_const9(self):
return self.constant(self.data['const9'], Type.int_8) # const9[7:0]
def fetch_operands(self):
return [self.get_const9()]
def compute_result(self, *args):
const9 = args[0]
if self.data['op2'] == 0x4: # SYSCALL
# trap(SYS, const9[7:0]) TODO
log_val("RC_Instructions_AD: trap(SYS, const9) - const9={0}".format(const9))
else:
print("Error: Unknown OP2={0}!".format(self.data['op2']))
print("RC instruction OP=AD, OP2=Unknown")
sys.exit(1)
| 39.728435 | 110 | 0.481705 | 5,128 | 37,305 | 3.273986 | 0.046412 | 0.013699 | 0.043243 | 0.053607 | 0.851391 | 0.808208 | 0.764429 | 0.743999 | 0.721186 | 0.712312 | 0 | 0.066975 | 0.36802 | 37,305 | 938 | 111 | 39.770789 | 0.645148 | 0.044873 | 0 | 0.646597 | 0 | 0 | 0.042611 | 0.00062 | 0 | 0 | 0.019741 | 0.001066 | 0 | 1 | 0.043194 | false | 0 | 0.006545 | 0.030105 | 0.108639 | 0.005236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6c07e9305c295d7f7881758d1ea1913ac83f87a3 | 120 | py | Python | scootplayer/queue/__init__.py | sbaildon/scootplayer | 9e433bba6291982ac3dddb393637985751b646e1 | [
"Apache-2.0"
] | 9 | 2015-05-15T12:16:13.000Z | 2022-01-21T18:33:20.000Z | scootplayer/queue/__init__.py | sbaildon/scootplayer | 9e433bba6291982ac3dddb393637985751b646e1 | [
"Apache-2.0"
] | 5 | 2015-01-12T13:43:08.000Z | 2020-03-31T06:05:18.000Z | scootplayer/queue/__init__.py | sbaildon/scootplayer | 9e433bba6291982ac3dddb393637985751b646e1 | [
"Apache-2.0"
] | 5 | 2015-01-19T11:14:58.000Z | 2020-06-05T04:44:26.000Z | """Import the named modules in this directory."""
from . import download
from . import playback
from . import playlist
| 20 | 49 | 0.75 | 16 | 120 | 5.625 | 0.6875 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 120 | 5 | 50 | 24 | 0.9 | 0.358333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6c24dac0a8ba38be32d8de9255e352fcebc00fda | 215 | py | Python | AI_Web/init.py | xwy27/ArtificialIntelligenceProjects | e2b0154f07d749084e2d670260fa82f8f5ea23ed | [
"MIT"
] | 4 | 2018-12-19T14:10:56.000Z | 2021-07-12T06:05:17.000Z | AI_Web/init.py | xwy27/ArtificialIntelligenceProjects | e2b0154f07d749084e2d670260fa82f8f5ea23ed | [
"MIT"
] | 1 | 2019-08-06T01:57:41.000Z | 2019-08-06T01:57:41.000Z | AI_Web/init.py | xwy27/ArtificialIntelligenceProjects | e2b0154f07d749084e2d670260fa82f8f5ea23ed | [
"MIT"
] | null | null | null | from SA.tools.pre_load_data import pre_load_data as sa_pre
from GA.tools.pre_load_data import pre_load_data as ga_pre
from os.path import join
sa_pre(join("SA", "tools", "Data"))
ga_pre(join("GA", "tools", "Data")) | 35.833333 | 58 | 0.762791 | 43 | 215 | 3.534884 | 0.27907 | 0.184211 | 0.289474 | 0.210526 | 0.460526 | 0.460526 | 0.460526 | 0.460526 | 0.460526 | 0 | 0 | 0 | 0.102326 | 215 | 6 | 59 | 35.833333 | 0.787565 | 0 | 0 | 0 | 0 | 0 | 0.101852 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.6 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6c70b365d20270b550662621f1b07ac8ce20134c | 258 | py | Python | Lib/feaLab/writers/markFeatureNoWriter.py | moyogo/fealab | 2ccc8e3b2ceb7e6a4d6c803db0a3b539e7b65e55 | [
"Apache-2.0"
] | null | null | null | Lib/feaLab/writers/markFeatureNoWriter.py | moyogo/fealab | 2ccc8e3b2ceb7e6a4d6c803db0a3b539e7b65e55 | [
"Apache-2.0"
] | null | null | null | Lib/feaLab/writers/markFeatureNoWriter.py | moyogo/fealab | 2ccc8e3b2ceb7e6a4d6c803db0a3b539e7b65e55 | [
"Apache-2.0"
] | null | null | null | class MarkFeatureWriter(object):
"""Skips generating the mark and mkmk features.
"""
def __init__(self, font, anchorList=(), mkmkAnchorList=(), ligaAnchorList=()):
pass
def write(self, doMark=False, doMkmk=False):
return ""
| 25.8 | 82 | 0.643411 | 26 | 258 | 6.230769 | 0.884615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.22093 | 258 | 9 | 83 | 28.666667 | 0.80597 | 0.170543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0.2 | 0 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 6 |
6699bfd899e70af1f012c77f528864cd21834631 | 35,923 | py | Python | teospy/icevap4.py | jarethholt/teospy | 3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f | [
"MIT"
] | null | null | null | teospy/icevap4.py | jarethholt/teospy | 3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f | [
"MIT"
] | null | null | null | teospy/icevap4.py | jarethholt/teospy | 3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f | [
"MIT"
] | null | null | null | """Ice-water vapour equilibrium functions.
This module provides thermodynamic properties of ice and water vapour in
equilibrium, e.g. the enthalpy of sublimation.
:Examples:
>>> temperature(pres=100.)
252.817910215
>>> densityvap(pres=100.)
8.57185487853e-4
>>> volumesubl(pres=100.)
1166.60755699
>>> entropysubl(pres=100.)
11225.8717816
>>> enthalpysubl(pres=100.)
2838101.44416
>>> pressure(temp=270.)
470.059067981
>>> densityvap(temp=270.)
3.77406140772e-3
>>> volumesubl(temp=270.)
264.965451558
>>> entropysubl(temp=270.)
10500.6135349
>>> enthalpysubl(temp=270.)
2835165.65442
:Functions:
* :func:`eq_tp`: Calculate ice-water vapour equilibrium properties at
either temperature or pressure.
* :func:`temperature`: Temperature at ice-water vapour equilibrium.
* :func:`pressure`: Pressure at ice-water vapour equilibrium.
* :func:`densityvap`: Water vapour density at ice-water vapour
equilibrium.
* :func:`chempot`: Chemical potential at ice-water vapour equilibrium.
* :func:`densityice`: Ice density at ice-water vapour equilibrium.
* :func:`enthalpyice`: Ice enthalpy at ice-water vapour equilibrium.
* :func:`enthalpyvap`: Water vapour enthalpy at ice-water vapour
equilibrium.
* :func:`entropyice`: Ice entropy at ice-water vapour equilibrium.
* :func:`entropyvap`: Water vapour entropy at ice-water vapour
equilibrium.
* :func:`volumesubl`: Specific volume of sublimation.
* :func:`entropysubl`: Specific entropy of sublimation.
* :func:`enthalpysubl`: Specific enthalpy of sublimation.
"""
__all__ = ['eq_tp','temperature','pressure','densityvap','chempot','densityice',
'enthalpyice','enthalpyvap','entropyice','entropyvap','volumesubl',
'entropysubl','enthalpysubl']
import warnings
import numpy
from teospy import constants0
from teospy import ice1
from teospy import flu2
from teospy import ice2
from teospy import maths3
from teospy import maths4
_CHKTOL = constants0.CHKTOL
_RWAT = constants0.RWAT
_TTP = constants0.TTP
_PTPE = constants0.PTPE
_LLVTP = constants0.LLVTP
_LILTP = constants0.LILTP
_CICE = constants0.CICE
_CVAP = constants0.CVAP
_chkflubnds = constants0.chkflubnds
_chkicebnds = constants0.chkicebnds
_ice_g = ice1.ice_g
_eq_chempot = flu2.eq_chempot
_eq_pressure = flu2.eq_pressure
_newton = maths3.newton
_AVI = (_LLVTP+_LILTP)/(_RWAT*_TTP)
_BVI = (_CICE-_CVAP)/_RWAT
_RAB = _AVI/_BVI
## Equilibrium functions
def _approx_t(temp):
"""Approximate PDv at T.
Approximate the pressure and water vapour density of ice and water
vapour in equilibrium at the given temperature. This approximation
is based on constant heat capacities.
:arg float temp: Temperature in K.
:returns: Pressure in Pa and water vapour density in kg/m3.
"""
earg = _AVI * (1 - _TTP/temp)
earg += _BVI * (1 - _TTP/temp - numpy.log(temp/_TTP))
pres = _PTPE * numpy.exp(earg)
dvap = pres / (_RWAT * temp)
return pres, dvap
def _approx_p(pres):
"""Approximate TDv at P.
Approximate the temperature and water vapour density of ice and
water vapour in equilibrium at the given pressure. This
approximation is based on constant heat capacities.
:arg float pres: Pressure in Pa.
:returns: Temperature in K and water vapour density in kg/m3.
"""
v = numpy.log(pres/_PTPE)/_BVI
x = maths4.lamb2(v,_RAB)
temp = _TTP/x
dvap = pres / (_RWAT * temp)
return temp, dvap
def _diff_t(p,dv,temp):
"""Calculate ice-vapour disequilibrium at T.
Calculate both sides of the equations
given pressure = pressure of water vapour
chemical potential of ice = potential of water vapour
and their Jacobians with respect to pressure and water vapour
density. Solving these equations gives the pressure and water vapour
density at the given temperature.
:arg float p: Pressure in Pa.
:arg float dv: Water vapour density in kg/m3.
:arg float temp: Temperature in K.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
pv = _eq_pressure(0,0,temp,dv)
gv = _eq_chempot(0,0,temp,dv)
gi = _ice_g(0,0,temp,p)
lhs = numpy.array([p, gi])
rhs = numpy.array([pv, gv])
pv_d = _eq_pressure(0,1,temp,dv)
gi_p = _ice_g(0,1,temp,p)
gv_d = _eq_chempot(0,1,temp,dv)
dlhs = numpy.array([[1.,0.], [gi_p,0.]])
drhs = numpy.array([[0.,pv_d], [0.,gv_d]])
return lhs, rhs, dlhs, drhs
def _diff_p(t,dv,pres):
"""Calculate ice-vapour disequilibrium at P.
Calculate both sides of the equations
given pressure = pressure of water vapour
chemical potential of ice = potential of water vapour
and their Jacobians with respect to temperature and water vapour
density. Solving these equations gives the temperature and water
vapour density at the given pressure.
:arg float t: Temperature in K.
:arg float dv: Water vapour density in kg/m3.
:arg float pres: Pressure in Pa.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
pv = _eq_pressure(0,0,t,dv)
gv = _eq_chempot(0,0,t,dv)
gi = _ice_g(0,0,t,pres)
lhs = numpy.array([pres, gi])
rhs = numpy.array([pv, gv])
pv_t = _eq_pressure(1,0,t,dv)
pv_d = _eq_pressure(0,1,t,dv)
gv_t = _eq_chempot(1,0,t,dv)
gi_t = _ice_g(1,0,t,pres)
gv_d = _eq_chempot(0,1,t,dv)
dlhs = numpy.array([[0.,0.], [gi_t,0.]])
drhs = numpy.array([[pv_t,pv_d], [gv_t,gv_d]])
return lhs, rhs, dlhs, drhs
def eq_tp(temp=None,pres=None,dvap=None,chkvals=False,chktol=_CHKTOL,
temp0=None,pres0=None,dvap0=None,chkbnd=False,mathargs=None):
"""Get primary ice-vapour variables at T or P.
Get the values of all primary variables for ice and water vapour in
equilibrium at either of a given temperature or pressure.
If the calculation has already been done, the results can be passed
to avoid unnecessary repeat calculations. If enough values are
passed, they will be checked for consistency if chkvals is True.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Temperature, pressure, and water vapour density (all in SI
units).
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
"""
if temp is None and pres is None:
errmsg = 'One of temp or pres must be provided'
raise ValueError(errmsg)
if temp is not None:
if any(val is None for val in (pres,dvap)):
x0 = (pres0,dvap0)
fargs = (temp,)
if mathargs is None:
mathargs = dict()
x1 = _newton(_diff_t,x0,_approx_t,fargs=fargs,**mathargs)
pres, dvap = x1
else:
x0 = (temp0,dvap0)
fargs = (pres,)
if mathargs is None:
mathargs = dict()
x1 = _newton(_diff_p,x0,_approx_p,fargs=fargs,**mathargs)
temp, dvap = x1
_chkflubnds(temp,dvap,chkbnd=chkbnd)
_chkicebnds(temp,pres,chkbnd=chkbnd)
if not chkvals:
return temp, pres, dvap
lhs, rhs, __, __ = _diff_p(temp,dvap,pres)
errs = list()
for (l,r) in zip(lhs,rhs):
if abs(r) >= chktol:
errs.append(abs(l/r-1))
else:
errs.append(abs(l-r))
if max(errs) > chktol:
warnmsg = ('Given values {0} and solutions {1} disagree to more than '
'the tolerance {2}').format(lhs,rhs,chktol)
warnings.warn(warnmsg,RuntimeWarning)
return temp, pres, dvap
### Thermodynamic properties
def temperature(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-vapour temperature.
Calculate the temperature of ice and water vapour in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Temperature in K.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> temperature(pres=100.)
252.817910215
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
return temp
def pressure(temp=None,pres=None,dvap=None,chkvals=False,chktol=_CHKTOL,
temp0=None,pres0=None,dvap0=None,chkbnd=False,mathargs=None):
"""Calculate ice-vapour pressure.
Calculate the pressure of ice and water vapour in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Pressure in Pa.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> pressure(temp=270.)
470.059067981
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
return pres
def densityvap(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-vapour vapour density.
Calculate the density of water vapour for ice and water vapour in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Water vapour density in kg/m3.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> densityvap(temp=270.)
3.77406140772e-3
>>> densityvap(pres=100.)
8.57185487853e-4
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
return dvap
def chempot(temp=None,pres=None,dvap=None,chkvals=False,chktol=_CHKTOL,
temp0=None,pres0=None,dvap0=None,chkbnd=False,mathargs=None):
"""Calculate ice-vapour chemical potential.
Calculate the chemical potential of ice and water vapour in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Chemical potential in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> chempot(temp=270.)
-3895.26747392
>>> chempot(pres=100.)
-26421.2820403
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
g = _ice_g(0,0,temp,pres)
return g
def densityice(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-vapour ice density.
Calculate the density of ice for ice and water vapour in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Ice density in kg/m3.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> densityice(temp=270.)
917.170465733
>>> densityice(pres=100.)
919.600269745
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
dice = ice2.density(temp,pres)
return dice
def enthalpyice(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-vapour ice enthalpy.
Calculate the specific enthalpy of ice for ice and water vapour in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpyice(temp=270.)
-340033.434649
>>> enthalpyice(pres=100.)
-374576.247867
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
hi = ice2.enthalpy(temp,pres)
return hi
def enthalpyvap(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-vapour vapour enthalpy.
Calculate the specific enthalpy of water vapour for ice and water
vapour in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpyvap(temp=270.)
2495132.21977
>>> enthalpyvap(pres=100.)
2463525.19629
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
hv = flu2.enthalpy(temp,dvap)
return hv
def entropyice(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-vapour ice entropy.
Calculate the specific entropy of ice for ice and water vapour in
equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> entropyice(temp=270.)
-1244.95617472
>>> entropyice(pres=100.)
-1377.09771247
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
si = ice2.entropy(temp,pres)
return si
def entropyvap(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-vapour vapour entropy.
Calculate the specific entropy of water vapour for ice and water
vapour in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> entropyvap(temp=270.)
9255.65736018
>>> entropyvap(pres=100.)
9848.77406912
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
sv = flu2.entropy(temp,dvap)
return sv
def volumesubl(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate volume of sublimation.
Calculate the specific volume of sublimation.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Specific volume in m3/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> volumesubl(temp=270.)
264.965451558
>>> volumesubl(pres=100.)
1166.60755699
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
vv = dvap**(-1)
vi = _ice_g(0,1,temp,pres)
vsubl = vv - vi
return vsubl
def entropysubl(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate entropy of sublimation.
Calculate the specific entropy of sublimation.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> entropysubl(temp=270.)
10500.6135349
>>> entropysubl(pres=100.)
11225.8717816
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
sv = flu2.entropy(temp,dvap)
si = ice2.entropy(temp,pres)
ssubl = sv - si
return ssubl
def enthalpysubl(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate enthalpy of sublimation.
Calculate the specific enthalpy of sublimation.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpysubl(temp=270.)
2835165.65442
>>> enthalpysubl(pres=100.)
2838101.44416
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
hv = flu2.enthalpy(temp,dvap)
hi = ice2.enthalpy(temp,pres)
hsubl = hv - hi
return hsubl
| 39.65011 | 80 | 0.677644 | 5,203 | 35,923 | 4.624832 | 0.056698 | 0.02269 | 0.035656 | 0.045381 | 0.860159 | 0.823173 | 0.79882 | 0.776503 | 0.774342 | 0.766862 | 0 | 0.02955 | 0.237898 | 35,923 | 905 | 81 | 39.693923 | 0.849399 | 0.705036 | 0 | 0.407035 | 0 | 0 | 0.029364 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085427 | false | 0 | 0.040201 | 0 | 0.21608 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
669a6b178fab9d1865fbf4f03011df0c95166a77 | 151 | py | Python | web/code/mmg/jobtrak/help/admin.py | 559Labs/JobTrak | 5b118248e9b6e62f479a335b5a23b7062b6f2368 | [
"Apache-2.0"
] | 1 | 2015-01-27T00:41:31.000Z | 2015-01-27T00:41:31.000Z | web/code/mmg/jobtrak/help/admin.py | andrewmarconi/JobTrak | 5b118248e9b6e62f479a335b5a23b7062b6f2368 | [
"Apache-2.0"
] | 118 | 2015-01-26T14:02:52.000Z | 2015-01-29T18:35:07.000Z | web/code/mmg/jobtrak/help/admin.py | MarconiMediaGroup/JobTrak | 5b118248e9b6e62f479a335b5a23b7062b6f2368 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from JobTrak.admin import JobTrakAdmin
#from mmg.jobtrak.links.models import *
#from mmg.jobtrak.core.models import *
| 30.2 | 39 | 0.81457 | 22 | 151 | 5.590909 | 0.5 | 0.113821 | 0.227642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10596 | 151 | 4 | 40 | 37.75 | 0.911111 | 0.496689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
66a9348b1156523bb13f680fce27128647b64405 | 25 | py | Python | __init__.py | yubarajshrestha/flask-boilerplate | 0dae36186a776506a9542e4df7fc34fbeccdc2a1 | [
"MIT"
] | null | null | null | __init__.py | yubarajshrestha/flask-boilerplate | 0dae36186a776506a9542e4df7fc34fbeccdc2a1 | [
"MIT"
] | null | null | null | __init__.py | yubarajshrestha/flask-boilerplate | 0dae36186a776506a9542e4df7fc34fbeccdc2a1 | [
"MIT"
] | null | null | null | import app
import config
| 8.333333 | 13 | 0.84 | 4 | 25 | 5.25 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 25 | 2 | 14 | 12.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
06cef2368e7261befaf85e30007dc277f89ff4c8 | 42 | py | Python | helpers/__init__.py | CacoNyan/fa2py | d56302df0869f2ad0232adfa3531298f12c5035c | [
"WTFPL"
] | null | null | null | helpers/__init__.py | CacoNyan/fa2py | d56302df0869f2ad0232adfa3531298f12c5035c | [
"WTFPL"
] | null | null | null | helpers/__init__.py | CacoNyan/fa2py | d56302df0869f2ad0232adfa3531298f12c5035c | [
"WTFPL"
] | null | null | null | from ..helpers import ini, text, lighting
| 21 | 41 | 0.761905 | 6 | 42 | 5.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 42 | 1 | 42 | 42 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
66129dcc493b2a64f74efab13a9510bfd35e46f8 | 41 | py | Python | facemask_augmenter/dlib_face_landmarks/__init__.py | agikarasugi/Face-Mask-Invariant-End-to-End-Face-Recognition | eb274ff98246c1bb8748bd8c8351d3494a87dfce | [
"MIT"
] | 1 | 2021-05-21T07:56:26.000Z | 2021-05-21T07:56:26.000Z | facemask_augmenter/dlib_face_landmarks/__init__.py | agikarasugi/Face-Mask-Invariant-End-to-End-Face-Recognition | eb274ff98246c1bb8748bd8c8351d3494a87dfce | [
"MIT"
] | null | null | null | facemask_augmenter/dlib_face_landmarks/__init__.py | agikarasugi/Face-Mask-Invariant-End-to-End-Face-Recognition | eb274ff98246c1bb8748bd8c8351d3494a87dfce | [
"MIT"
] | 1 | 2021-08-10T05:34:53.000Z | 2021-08-10T05:34:53.000Z | from .dlib_api import get_face_landmarks
| 20.5 | 40 | 0.878049 | 7 | 41 | 4.714286 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 41 | 1 | 41 | 41 | 0.891892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b0e850fc66a65fb29fac2e33b29fefec20195cda | 13,606 | py | Python | tests/test_sparse_vector.py | wbknez/word-categorization | a6cfa9961fdac2ae781d19b496a2e849e1bd7e2a | [
"Apache-2.0"
] | null | null | null | tests/test_sparse_vector.py | wbknez/word-categorization | a6cfa9961fdac2ae781d19b496a2e849e1bd7e2a | [
"Apache-2.0"
] | null | null | null | tests/test_sparse_vector.py | wbknez/word-categorization | a6cfa9961fdac2ae781d19b496a2e849e1bd7e2a | [
"Apache-2.0"
] | null | null | null | """
Contains unit tests to verify that sparse vector operations work as intended.
"""
from copy import copy
import numpy as np
from unittest import TestCase
from wordcat.sparse import SparseVector
class SparseVectorTest(TestCase):
"""
Test suite for SparseVector.
"""
def test_abs_with_random(self):
array = np.random.randint(-20, 20, 20)
vec = SparseVector.from_list(array)
expected = SparseVector.from_list(array)
expected.data = np.abs(expected.data)
result = vec.abs()
self.assertEqual(result, expected)
def test_abs_with_zero(self):
array = np.random.randint(0, 1, 20)
vec = SparseVector.from_list(array)
expected = SparseVector.from_list(array)
result = vec.abs()
self.assertEqual(result, expected)
def test_add_with_random_vector(self):
array_a = np.array([1, 0, 2, 0, 3, 0, 4, 5])
array_b = np.array([2, 1, 0, 3, 4, 12, 0, 7])
a = SparseVector.from_list(array_a)
b = SparseVector.from_list(array_b)
expected = SparseVector.from_list([3, 0, 0, 0, 7, 0, 0, 12])
result = a + b
self.assertEqual(expected, result)
def test_add_with_random_scalar(self):
array = np.array([1, 0, 2, 0, 3, 0, 4, 5])
vec = SparseVector.from_list(array)
scalar = np.random.randint(1, 100)
expected = SparseVector.from_list(array)
expected.data = np.add(expected.data,
np.full(expected.data.size, scalar))
expected.compact()
result = vec + scalar
self.assertEqual(result, expected)
def test_add_with_zero_vector(self):
a = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5])
b = SparseVector.zero(8)
expected = SparseVector.zero(8)
result = a + b
self.assertEqual(expected, result)
def test_add_with_zero_scalar(self):
vec = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5])
scalar = 0
expected = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5])
result = vec + scalar
self.assertEqual(result, expected)
def test_compact_with_random(self):
data = np.random.randint(0, 10, 30, dtype=np.uint16)
indices = np.arange(30, dtype=np.uint32)
vec = SparseVector(data, indices, 30)
vec.compact()
zi = np.where(data == 0)
expected = SparseVector(np.delete(data, zi), np.delete(indices, zi), 30)
result = copy(vec)
self.assertEqual(result, expected)
def test_compact_with_zero(self):
vec = SparseVector(
np.zeros(10, dtype=np.uint16), np.arange(10, dtype=np.uint32), 10
)
vec.compact()
expected = SparseVector.zero(10)
result = copy(vec)
self.assertEqual(result, expected)
def test_divide_with_random_vector(self):
array_a = np.random.randint(0, 100, 30)
array_b = np.random.randint(0, 100, array_a.size)
a = SparseVector.from_list(array_a)
b = SparseVector.from_list(array_b)
a_idx = np.in1d(a.indices, b.indices)
b_idx = np.in1d(b.indices, a.indices)
expected = SparseVector.from_list(array_a)
expected.data = np.divide(a.data[a_idx], b.data[b_idx])
expected.indices = a.indices[a_idx]
expected.size = a.size
result = a / b
self.assertEqual(result, expected)
def test_divide_with_random_scalar(self):
array = np.array([1, 0, 2, 0, 3, 0, 4, 5])
vec = SparseVector.from_list(array)
scalar = np.random.randint(1, 100)
expected = SparseVector.from_list(array)
expected.data = np.divide(expected.data, scalar)
result = vec / scalar
self.assertEqual(result, expected)
def test_divide_with_zero_vector(self):
a = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5])
b = SparseVector.zero(8)
expected = SparseVector.zero(8)
result = a / b
self.assertEqual(expected, result)
def test_divide_with_zero_scalar_throws(self):
with self.assertRaises(ZeroDivisionError):
_ = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5]) / 0
def test_exp_with_random(self):
array = np.random.randint(0, 20, 20)
vec = SparseVector.from_list(array)
expected = SparseVector.from_list(array)
expected.data = np.exp(expected.data)
result = vec.exp()
self.assertEqual(result, expected)
def test_exp_with_zero(self):
array = np.random.randint(0, 1, 20)
vec = SparseVector.from_list(array)
expected = SparseVector.from_list(array)
expected.data = np.exp(expected.data)
result = vec.exp()
self.assertEqual(result, expected)
def test_log2_with_random(self):
array = np.random.randint(0, 20, 20)
vec = SparseVector.from_list(array)
expected = SparseVector.from_list(array)
expected.data = np.log2(expected.data)
result = vec.log2()
self.assertEqual(result, expected)
def test_log2_with_zero(self):
array = np.random.randint(0, 1, 20)
vec = SparseVector.from_list(array)
expected = SparseVector.from_list(array)
expected.data = np.log2(expected.data)
result = vec.log2()
self.assertEqual(result, expected)
def test_multiply_with_random_vector(self):
array_a = np.array([1, 0, 2, 0, 3, 0, 4, 5])
array_b = np.random.randint(0, 100, array_a.size)
a = SparseVector.from_list(array_a)
b = SparseVector.from_list(array_b)
expected = SparseVector.from_list(np.multiply(array_a, array_b))
result = a * b
self.assertEqual(expected, result)
def test_multiply_with_random_scalar(self):
array = np.array([1, 0, 2, 0, 3, 0, 4, 5])
vec = SparseVector.from_list(array)
scalar = np.random.randint(1, 100)
expected = SparseVector.from_list(np.multiply(array, scalar))
result = vec * scalar
self.assertEqual(result, expected)
def test_multiply_with_zero_vector(self):
a = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5])
b = SparseVector.zero(8)
expected = SparseVector.zero(8)
result = a * b
self.assertEqual(expected, result)
def test_multiply_with_zero_scalar(self):
vec = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5])
scalar = 0
expected = SparseVector.zero(8)
result = vec * scalar
self.assertEqual(result, expected)
def test_negate_with_random(self):
array = np.random.randint(0, 100, 20)
vec = SparseVector.from_list(array)
expected = SparseVector.from_list(array)
expected.data = np.negative(expected.data)
result = -vec
self.assertEqual(result, expected)
def test_negate_with_zero(self):
vec = SparseVector.zero(4)
expected = SparseVector.zero(4)
result = -vec
self.assertEqual(result, expected)
def test_power_with_random(self):
array = np.random.randint(0, 20, 20)
vec = SparseVector.from_list(array)
a = np.random.randint(2, 10)
expected = SparseVector.from_list(array)
expected.data = np.power(expected.data, a)
result = vec.power(a)
self.assertEqual(result, expected)
def test_power_with_zero(self):
array = np.random.randint(0, 1, 20)
vec = SparseVector.from_list(array)
a = np.random.randint(2, 10)
expected = SparseVector.from_list(array)
expected.data = np.power(expected.data, a)
result = vec.power(a)
self.assertEqual(result, expected)
def test_subtract_with_random_vector(self):
array_a = np.array([1, 0, 2, 0, 3, 0, 4, 5])
array_b = np.array([2, 1, 0, 3, 4, 12, 0, 7])
a = SparseVector.from_list(array_a)
b = SparseVector.from_list(array_b)
expected = SparseVector.from_list([-1, 0, 0, 0, -1, 0, 0, -2])
result = a - b
self.assertEqual(expected, result)
def test_subtract_with_random_scalar(self):
array = np.array([1, 0, 2, 0, 3, 0, 4, 5])
vec = SparseVector.from_list(array)
scalar = np.random.randint(1, 100)
expected = SparseVector.from_list(array)
expected.data = np.subtract(expected.data,
np.full(expected.data.size, scalar))
expected.compact()
result = vec - scalar
self.assertEqual(result, expected)
def test_subtract_with_zero_vector(self):
a = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5])
b = SparseVector.zero(8)
expected = SparseVector.zero(8)
result = a - b
self.assertEqual(expected, result)
def test_subtract_with_zero_scalar(self):
vec = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5])
scalar = 0
expected = SparseVector.from_list([1, 0, 2, 0, 3, 0, 4, 5])
result = vec - scalar
self.assertEqual(result, expected)
def test_sum_with_random(self):
array = np.random.randint(0, 100, 20)
vec = SparseVector.from_list(array)
expected = np.sum(array)
result = vec.sum()
self.assertEqual(result, expected)
def test_sum_with_zero(self):
vec = SparseVector.zero(7)
expected = 0
result = vec.sum()
self.assertEqual(result, expected)
def test_to_dense_with_random(self):
array = np.random.randint(0, 5, 30, dtype=np.uint16)
vec = SparseVector.from_list(array)
expected = np.copy(array)
result = vec.to_dense()
self.assertTrue(np.array_equal(result, expected))
def test_to_dense_with_zero(self):
vec = SparseVector.zero(7)
expected = np.zeros(7, dtype=np.uint16)
result = vec.to_dense()
self.assertTrue(np.array_equal(result, expected))
def test_value_at_when_value_is_not_zero(self):
array = np.random.randint(1, 10, 20)
vec = SparseVector.from_list(array)
expected = array.tolist()
result = [vec.value_at(i) for i in range(vec.size)]
self.assertEqual(result, expected)
def test_value_at_when_value_is_zero(self):
vec = SparseVector.zero(7)
expected = [0] * 7
result = [vec.value_at(i) for i in range(vec.size)]
self.assertEqual(result, expected)
def test_venn_with_random(self):
array_a = np.random.randint(0, 100, 30)
array_b = np.random.randint(0, 100, 30)
a = SparseVector.from_list(array_a)
b = SparseVector.from_list(array_b)
a_i = np.in1d(a.indices, b.indices)
a_d = np.in1d(a.indices,
np.setdiff1d(a.indices, a.indices[a_i]))
expected0 = SparseVector(a.data[a_i], a.indices[a_i], a.size)
expected1 = SparseVector(a.data[a_d], a.indices[a_d], a.size)
result0, result1 = a.venn(b)
self.assertEqual(result0, expected0)
self.assertEqual(result1, expected1)
self.assertEqual(result0.data.size + result1.data.size, a.data.size)
self.assertEqual(result0.indices.size + result1.indices.size,
a.indices.size)
def test_venn_with_zero(self):
a = SparseVector.from_list([1, 0, 2, 0, 3, 4, 0, 5])
b = SparseVector.zero(8)
expected0 = SparseVector.zero(8)
expected1 = SparseVector.from_list([1, 0, 2, 0, 3, 4, 0, 5])
result0, result1 = a.venn(b)
self.assertEqual(result0, expected0)
self.assertEqual(result1, expected1)
def test_from_list_with_no_unique_elements(self):
vec = SparseVector.from_list([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertEqual(vec.size, 10)
self.assertTrue(np.array_equal(vec.data, np.array([], dtype=np.uint16)))
self.assertTrue(np.array_equal(vec.indices,
np.array([], dtype=np.uint32)))
def test_from_list_with_several_unique_elements(self):
vec = SparseVector.from_list([0, 1, 2, 0, 3, 4, 0, 5, 6, 0, 7, 8])
self.assertEqual(vec.size, 12)
self.assertTrue(np.array_equal(np.array([1, 2, 3, 4, 5, 6, 7, 8],
dtype=np.uint16),
vec.data))
self.assertTrue(np.array_equal(np.array([1, 2, 4, 5, 7, 8, 10, 11],
dtype=np.uint32),
vec.indices))
def test_from_lists_with_no_unique_elements(self):
vec = SparseVector.from_lists([], [], 5)
self.assertEqual(vec.size, 5)
self.assertTrue(np.array_equal(vec.data, np.array([], dtype=np.uint16)))
self.assertTrue(np.array_equal(vec.indices,
np.array([], dtype=np.uint32)))
def test_from_lists_with_several_unique_elements(self):
vec = SparseVector.from_lists([1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 4, 5, 7, 8, 10, 11], 12)
self.assertEqual(vec.size, 12)
self.assertTrue(np.array_equal(np.array([1, 2, 3, 4, 5, 6, 7, 8],
dtype=np.uint16),
vec.data))
self.assertTrue(np.array_equal(np.array([1, 2, 4, 5, 7, 8, 10, 11],
dtype=np.uint32),
vec.indices))
| 31.715618 | 80 | 0.593341 | 1,811 | 13,606 | 4.309221 | 0.060188 | 0.120964 | 0.146079 | 0.124936 | 0.842901 | 0.824705 | 0.810738 | 0.788698 | 0.717196 | 0.660174 | 0 | 0.049629 | 0.286197 | 13,606 | 428 | 81 | 31.78972 | 0.753913 | 0.007791 | 0 | 0.655405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.175676 | 1 | 0.135135 | false | 0 | 0.013514 | 0 | 0.152027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b0fc5a3e2564556242d2131d36622ad97df1e2a7 | 111 | py | Python | tests/__init__.py | kaixinguo360/genpac | 2979ff95192c111dd7fdb612dc1c59552f5907c2 | [
"MIT"
] | 2,331 | 2015-01-22T02:59:22.000Z | 2022-03-25T14:31:52.000Z | tests/__init__.py | surichard/genpac | 2f466d28f403a9a5624e02edcd538475fe475fc8 | [
"MIT"
] | 38 | 2015-04-25T10:06:23.000Z | 2022-03-07T08:22:41.000Z | tests/__init__.py | surichard/genpac | 2f466d28f403a9a5624e02edcd538475fe475fc8 | [
"MIT"
] | 398 | 2015-02-06T03:35:49.000Z | 2022-02-19T18:20:33.000Z | def setup_module(module):
print('setup_module')
def teardown_module(module):
print('teardown_module')
| 18.5 | 28 | 0.738739 | 14 | 111 | 5.571429 | 0.357143 | 0.282051 | 0.435897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.135135 | 111 | 5 | 29 | 22.2 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0.243243 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0 | 0.5 | 0.5 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
c6619b5b13793dbab72991cba8ed9253d0d6a180 | 39 | py | Python | kikimr/public/sdk/python/client/auth_helpers.py | yandex-cloud/ydb-python-sdk | 0df2dce2d77fc41ad3020072740f51dd91630177 | [
"Apache-2.0"
] | 19 | 2019-07-01T08:25:29.000Z | 2022-01-26T14:46:51.000Z | kikimr/public/sdk/python/client/auth_helpers.py | yandex-cloud/ydb-python-sdk | 0df2dce2d77fc41ad3020072740f51dd91630177 | [
"Apache-2.0"
] | 5 | 2019-07-02T13:36:42.000Z | 2021-09-14T06:46:48.000Z | kikimr/public/sdk/python/client/auth_helpers.py | yandex-cloud/ydb-python-sdk | 0df2dce2d77fc41ad3020072740f51dd91630177 | [
"Apache-2.0"
] | 10 | 2019-06-07T10:36:19.000Z | 2021-10-15T08:58:11.000Z | from ydb.auth_helpers import * # noqa
| 19.5 | 38 | 0.74359 | 6 | 39 | 4.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.179487 | 39 | 1 | 39 | 39 | 0.875 | 0.102564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c66ff360e95f534364e470bf11ca270b076f25bc | 36 | py | Python | emoji_puncher/level/__init__.py | GIider/EmojiPuncher | 87f93df7b647d1ddb53d7fe6cd579b7c2cd57071 | [
"MIT"
] | null | null | null | emoji_puncher/level/__init__.py | GIider/EmojiPuncher | 87f93df7b647d1ddb53d7fe6cd579b7c2cd57071 | [
"MIT"
] | null | null | null | emoji_puncher/level/__init__.py | GIider/EmojiPuncher | 87f93df7b647d1ddb53d7fe6cd579b7c2cd57071 | [
"MIT"
] | null | null | null | # coding=utf-8
from .level import *
| 12 | 20 | 0.694444 | 6 | 36 | 4.166667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 0.166667 | 36 | 2 | 21 | 18 | 0.8 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c6caebc27ef093bd8a398faa09ed6e01acf057c5 | 24 | py | Python | app/admin/examples/eg005_audit_users/__init__.py | davidgacc/docusign | e63167101656d0066d481844576ce687ea80eb91 | [
"MIT"
] | 21 | 2020-05-13T21:08:44.000Z | 2022-02-18T01:32:16.000Z | app/admin/examples/eg005_audit_users/__init__.py | davidgacc/docusign | e63167101656d0066d481844576ce687ea80eb91 | [
"MIT"
] | 8 | 2020-11-23T09:28:04.000Z | 2022-02-02T12:04:08.000Z | app/admin/examples/eg005_audit_users/__init__.py | davidgacc/docusign | e63167101656d0066d481844576ce687ea80eb91 | [
"MIT"
] | 26 | 2020-05-12T22:20:01.000Z | 2022-03-09T10:57:27.000Z | from .views import eg005 | 24 | 24 | 0.833333 | 4 | 24 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 0.125 | 24 | 1 | 24 | 24 | 0.809524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c6fd2539a2cb9340d5159739b3f7d3c59ecbd346 | 163 | py | Python | helx/__init__.py | epignatelli/helx | 3dbbf228a63b79169e80f890b97db3d7473e956e | [
"Apache-2.0"
] | 1 | 2021-07-27T00:03:47.000Z | 2021-07-27T00:03:47.000Z | helx/__init__.py | epignatelli/helx | 3dbbf228a63b79169e80f890b97db3d7473e956e | [
"Apache-2.0"
] | 10 | 2021-06-16T08:42:25.000Z | 2021-07-05T08:41:51.000Z | helx/__init__.py | epignatelli/helx | 3dbbf228a63b79169e80f890b97db3d7473e956e | [
"Apache-2.0"
] | null | null | null | from . import typing
from . import jax
from . import random
from . import image
from . import distributed
from . import nn
from . import rl
from . import optimise
| 18.111111 | 25 | 0.754601 | 24 | 163 | 5.125 | 0.416667 | 0.650407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.196319 | 163 | 8 | 26 | 20.375 | 0.938931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
05ac084a30dfe6b7e2b64a4c03027135bade50d6 | 29 | py | Python | regym/rl_algorithms/algorithms/DQN/__init__.py | KnwSondess/Regym | 825c7dacf955a3e2f6c658c0ecb879a0ca036c1a | [
"MIT"
] | 2 | 2020-09-13T15:53:20.000Z | 2020-12-08T15:57:05.000Z | regym/rl_algorithms/algorithms/DQN/__init__.py | KnwSondess/Regym | 825c7dacf955a3e2f6c658c0ecb879a0ca036c1a | [
"MIT"
] | null | null | null | regym/rl_algorithms/algorithms/DQN/__init__.py | KnwSondess/Regym | 825c7dacf955a3e2f6c658c0ecb879a0ca036c1a | [
"MIT"
] | 1 | 2021-09-20T13:48:30.000Z | 2021-09-20T13:48:30.000Z | from .dqn import DQNAlgorithm | 29 | 29 | 0.862069 | 4 | 29 | 6.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 29 | 1 | 29 | 29 | 0.961538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
05b573079ec0b84365dc0f84803ababcebc81b64 | 168 | py | Python | annoyed-alligators/socl_media/apps/users/apps.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | annoyed-alligators/socl_media/apps/users/apps.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | annoyed-alligators/socl_media/apps/users/apps.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'socl_media.apps.users'
def ready(self):
import socl_media.apps.users.signals
| 18.666667 | 44 | 0.720238 | 22 | 168 | 5.409091 | 0.681818 | 0.151261 | 0.218487 | 0.302521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.190476 | 168 | 8 | 45 | 21 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.4 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
af016b95aedaae25d027088a97986533c0c7e944 | 133 | py | Python | dataset/__init__.py | NotMorven/cavaface.pytorch | 822651f0e6d4d08df5441922acead39dc5375103 | [
"MIT"
] | 329 | 2020-04-17T03:03:52.000Z | 2021-08-04T07:57:07.000Z | dataset/__init__.py | NotMorven/cavaface.pytorch | 822651f0e6d4d08df5441922acead39dc5375103 | [
"MIT"
] | 76 | 2020-05-22T05:21:33.000Z | 2021-07-30T03:39:06.000Z | dataset/__init__.py | NotMorven/cavaface.pytorch | 822651f0e6d4d08df5441922acead39dc5375103 | [
"MIT"
] | 65 | 2020-05-07T08:57:16.000Z | 2021-07-21T20:10:44.000Z | from dataset.datasets import MXFaceDataset, SyntheticDataset
from dataset.randaugment import RandAugment
from dataset.utils import *
| 33.25 | 60 | 0.864662 | 15 | 133 | 7.666667 | 0.533333 | 0.286957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097744 | 133 | 3 | 61 | 44.333333 | 0.958333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
af425686177fd396a489fef82223fcfc7f38fe64 | 3,998 | py | Python | Online_shop/Online_shop/main_app/migrations/0001_initial.py | petel3/Softuni_education | 4fd80f8c6ce6c3d6a838edecdb091dda2ed1084c | [
"MIT"
] | 2 | 2022-03-05T13:17:12.000Z | 2022-03-05T13:17:16.000Z | Online_shop/Online_shop/main_app/migrations/0001_initial.py | petel3/Softuni_education | 4fd80f8c6ce6c3d6a838edecdb091dda2ed1084c | [
"MIT"
] | null | null | null | Online_shop/Online_shop/main_app/migrations/0001_initial.py | petel3/Softuni_education | 4fd80f8c6ce6c3d6a838edecdb091dda2ed1084c | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-26 09:05
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Souvenir',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('quantity', models.IntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('type', models.CharField(choices=[('Luxary', 'Luxary'), ('Normal', 'Normal')], max_length=6)),
('description', models.TextField(blank=True, null=True)),
('price', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0)])),
('image', models.ImageField(upload_to='mediafiles/')),
('user_key', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Plant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('quantity', models.IntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('type', models.CharField(choices=[('Winter plant', 'Winter plant'), ('Summer plant', 'Summer plant'), ('Spring plant', 'Spring plant'), ('Autumn plant', 'Autumn plant')], max_length=12)),
('description', models.TextField(blank=True, null=True)),
('price', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0)])),
('image', models.ImageField(upload_to='mediafiles/')),
('user_key', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Jewelry',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('quantity', models.IntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('description', models.TextField(blank=True, null=True)),
('materials', models.CharField(choices=[('Gold', 'Gold'), ('Steel', 'Steel'), ('Silver', 'Silver')], max_length=6)),
('price', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0)])),
('image', models.ImageField(upload_to='mediafiles/')),
('user_key', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Flower',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('quantity', models.IntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('type', models.CharField(choices=[('Bouquet', 'Bouquet'), ('Basket', 'Basket')], max_length=7)),
('description', models.TextField(blank=True, null=True)),
('price', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0)])),
('image', models.ImageField(upload_to='mediafiles/')),
('user_key', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 56.309859 | 204 | 0.609555 | 397 | 3,998 | 6.030227 | 0.224181 | 0.037594 | 0.075188 | 0.100251 | 0.767335 | 0.767335 | 0.767335 | 0.749373 | 0.749373 | 0.749373 | 0 | 0.013008 | 0.230865 | 3,998 | 70 | 205 | 57.114286 | 0.765528 | 0.011256 | 0 | 0.698413 | 1 | 0 | 0.113136 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.063492 | 0 | 0.126984 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
af783646c4f1e41853e9e8663f57c09422fdb2ae | 191 | py | Python | omtool/core/creation/__init__.py | Kraysent/OMTool | abb293ee359720d622ed0c4ecdf90967171007c8 | [
"Apache-2.0"
] | null | null | null | omtool/core/creation/__init__.py | Kraysent/OMTool | abb293ee359720d622ed0c4ecdf90967171007c8 | [
"Apache-2.0"
] | 51 | 2021-12-05T13:31:51.000Z | 2022-03-27T16:05:04.000Z | omtool/core/creation/__init__.py | Kraysent/OMTool | abb293ee359720d622ed0c4ecdf90967171007c8 | [
"Apache-2.0"
] | null | null | null | '''
Source files for creation module of the OMTool.
'''
from omtool.core.creation.snapshot_builder import SnapshotBuilder
from omtool.core.creation.config import CreationConfig, Type, Object
| 31.833333 | 68 | 0.816754 | 25 | 191 | 6.2 | 0.72 | 0.129032 | 0.180645 | 0.283871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.104712 | 191 | 5 | 69 | 38.2 | 0.906433 | 0.246073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
afb56dfc593411b9d9facb59b17dee30759894e4 | 157 | py | Python | test/suite.py | johnstonskj/guernsey | 47df75eb486c2a99bc44c3a4a2668fe4c8da2a87 | [
"BSD-3-Clause"
] | null | null | null | test/suite.py | johnstonskj/guernsey | 47df75eb486c2a99bc44c3a4a2668fe4c8da2a87 | [
"BSD-3-Clause"
] | null | null | null | test/suite.py | johnstonskj/guernsey | 47df75eb486c2a99bc44c3a4a2668fe4c8da2a87 | [
"BSD-3-Clause"
] | null | null | null | from paths import *
from chaining import *
from filters import *
from entities import *
if __name__ == '__main__':
import unittest
unittest.main()
| 15.7 | 26 | 0.713376 | 19 | 157 | 5.473684 | 0.526316 | 0.288462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.210191 | 157 | 9 | 27 | 17.444444 | 0.83871 | 0 | 0 | 0 | 0 | 0 | 0.051282 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.714286 | 0 | 0.714286 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
afbdac8b270a130c22e27e06d7c3bed547287d24 | 33 | py | Python | visualizer/__init__.py | liuruiqiang/InstrumentSegmentation | 32bc58e84fbb4fb1d1aa5932c67d9a97a4a8db77 | [
"MIT"
] | null | null | null | visualizer/__init__.py | liuruiqiang/InstrumentSegmentation | 32bc58e84fbb4fb1d1aa5932c67d9a97a4a8db77 | [
"MIT"
] | null | null | null | visualizer/__init__.py | liuruiqiang/InstrumentSegmentation | 32bc58e84fbb4fb1d1aa5932c67d9a97a4a8db77 | [
"MIT"
] | null | null | null | from .visualizer import get_local | 33 | 33 | 0.878788 | 5 | 33 | 5.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 33 | 1 | 33 | 33 | 0.933333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
afd025a1657cc6821c79ba6c2fedf33e8cc60d8c | 34 | py | Python | AIs/Mehul Jain/__init__.py | YSabarad/monopyly | 0460f2452c83846b6b9e3b234be411e12a86d69c | [
"MIT"
] | 4 | 2015-11-04T21:18:40.000Z | 2020-12-26T21:15:23.000Z | AIs/Mehul Jain/__init__.py | YSabarad/monopyly | 0460f2452c83846b6b9e3b234be411e12a86d69c | [
"MIT"
] | 2 | 2021-08-09T18:19:58.000Z | 2021-08-10T14:44:54.000Z | AIs/Mehul Jain/__init__.py | YSabarad/monopyly | 0460f2452c83846b6b9e3b234be411e12a86d69c | [
"MIT"
] | 6 | 2015-08-01T17:54:17.000Z | 2022-02-28T00:00:21.000Z | from .mumbaikar import MumbaikarAI | 34 | 34 | 0.882353 | 4 | 34 | 7.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088235 | 34 | 1 | 34 | 34 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
bb771be3767e6a23d44d2f334cf7bd9b3b70d8bf | 2,712 | py | Python | sktime/performance_metrics/forecasting/__init__.py | tombh/sktime | 53df0b9ed9d1fd800539165c414cc5611bcc56b3 | [
"BSD-3-Clause"
] | null | null | null | sktime/performance_metrics/forecasting/__init__.py | tombh/sktime | 53df0b9ed9d1fd800539165c414cc5611bcc56b3 | [
"BSD-3-Clause"
] | null | null | null | sktime/performance_metrics/forecasting/__init__.py | tombh/sktime | 53df0b9ed9d1fd800539165c414cc5611bcc56b3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["Markus Löning", "Tomasz Chodakowski", "Martin Walter"]
__all__ = [
"make_forecasting_scorer",
"MeanAbsoluteScaledError",
"MedianAbsoluteScaledError",
"MeanSquaredScaledError",
"MedianSquaredScaledError",
"MeanAbsoluteError",
"MeanSquaredError",
"MedianAbsoluteError",
"MedianSquaredError",
"MeanAbsolutePercentageError",
"MedianAbsolutePercentageError",
"MeanSquaredPercentageError",
"MedianSquaredPercentageError",
"MeanRelativeAbsoluteError",
"MedianRelativeAbsoluteError",
"GeometricMeanRelativeAbsoluteError",
"GeometricMeanRelativeSquaredError",
"MeanAsymmetricError",
"RelativeLoss",
"mean_absolute_scaled_error",
"median_absolute_scaled_error",
"mean_squared_scaled_error",
"median_squared_scaled_error",
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"median_squared_error",
"mean_absolute_percentage_error",
"median_absolute_percentage_error",
"mean_squared_percentage_error",
"median_squared_percentage_error",
"mean_relative_absolute_error",
"median_relative_absolute_error",
"geometric_mean_relative_absolute_error",
"geometric_mean_relative_squared_error",
"mean_asymmetric_error",
"relative_loss",
"evaluate",
]
from sktime.performance_metrics.forecasting._classes import (
make_forecasting_scorer,
MeanAbsoluteScaledError,
MedianAbsoluteScaledError,
MeanSquaredScaledError,
MedianSquaredScaledError,
MeanAbsoluteError,
MeanSquaredError,
MedianAbsoluteError,
MedianSquaredError,
MeanAbsolutePercentageError,
MedianAbsolutePercentageError,
MeanSquaredPercentageError,
MedianSquaredPercentageError,
MeanRelativeAbsoluteError,
MedianRelativeAbsoluteError,
GeometricMeanRelativeAbsoluteError,
GeometricMeanRelativeSquaredError,
MeanAsymmetricError,
RelativeLoss,
)
from sktime.performance_metrics.forecasting._functions import (
mean_absolute_scaled_error,
median_absolute_scaled_error,
mean_squared_scaled_error,
median_squared_scaled_error,
mean_absolute_error,
mean_squared_error,
median_absolute_error,
median_squared_error,
mean_absolute_percentage_error,
median_absolute_percentage_error,
mean_squared_percentage_error,
median_squared_percentage_error,
mean_relative_absolute_error,
median_relative_absolute_error,
geometric_mean_relative_absolute_error,
geometric_mean_relative_squared_error,
mean_asymmetric_error,
relative_loss,
)
| 30.818182 | 71 | 0.773968 | 217 | 2,712 | 9.147465 | 0.294931 | 0.077582 | 0.057431 | 0.050378 | 0.914861 | 0.875567 | 0.875567 | 0.875567 | 0.875567 | 0.875567 | 0 | 0.001307 | 0.153392 | 2,712 | 87 | 72 | 31.172414 | 0.86324 | 0.042773 | 0 | 0 | 0 | 0 | 0.374855 | 0.288855 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02439 | 0 | 0.02439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
bb8a924d8424ff52412c104aa101525a78ddd48b | 4,921 | py | Python | project-structure/src/test/python/com/johnowl/hello/service/validator_service_test.py | johnowl/python-studies | 7641dfe781532d74f162c701f28ccba9b90b5075 | [
"Apache-2.0"
] | null | null | null | project-structure/src/test/python/com/johnowl/hello/service/validator_service_test.py | johnowl/python-studies | 7641dfe781532d74f162c701f28ccba9b90b5075 | [
"Apache-2.0"
] | null | null | null | project-structure/src/test/python/com/johnowl/hello/service/validator_service_test.py | johnowl/python-studies | 7641dfe781532d74f162c701f28ccba9b90b5075 | [
"Apache-2.0"
] | null | null | null | from src.main.python.com.johnowl.hello.service.validator_service import ValidatorService
from src.main.python.com.johnowl.hello.service.validator_service import ValidationError
import unittest
class HelloServiceTest(unittest.TestCase):
def test_when_validate_without_body_and_with_valid_data_should_return_true(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54a4", # random uuid v4
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" # empty string hash
}
result = service.is_valid(headers)
self.assertEqual(result, True)
def test_when_validate_without_body_and_with_invalid_application_id_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54", # invalid uuid v4
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" # empty string hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "application_id_invalid")
self.assertEqual(result.message, "Application-Id inválido.")
def test_when_validate_without_body_and_with_invalid_digest_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54a4", # random uuid v4
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b856" # invalid hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "digest_invalid")
self.assertEqual(result.message, "Digest inválido.")
def test_when_validate_without_body_and_with_invalid_format_digest_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54a4", # random uuid v4
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb924?7ae41e4649b934ca495991b7852b856" # invalid format hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "digest_invalid")
self.assertEqual(result.message, "Digest inválido.")
def test_when_validate_with_valid_data_should_return_true(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54a4", # random uuid v4
"Digest": "sha256=5e2bf57d3f40c4b6df69daf1936cb766f832374b4fc0259a7cbff06e2f70f269" # valid hash
}
result = service.is_valid(headers, body="lorem ipsum")
self.assertTrue(result)
def test_when_validate_without_body_and_with_empty_application_id_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "",
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" # empty string hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "application_id_not_found")
self.assertEqual(result.message, "Application-Id não encontrado.")
def test_when_validate_without_body_and_with_empty_digest_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54", # invalid uuid v4
"Digest": "" # empty string hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "digest_not_found")
self.assertEqual(result.message, "Digest não encontrado.")
def test_when_validate_without_body_and_without_digest_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54" # invalid uuid v4
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "digest_not_found")
self.assertEqual(result.message, "Digest não encontrado.")
def test_when_validate_without_body_and_without_application_id_should_return_error(self):
service = ValidatorService()
headers = {
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" # empty string hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "application_id_not_found")
self.assertEqual(result.message, "Application-Id não encontrado.") | 49.707071 | 118 | 0.711238 | 480 | 4,921 | 7.035417 | 0.14375 | 0.065443 | 0.093278 | 0.050637 | 0.893693 | 0.89162 | 0.870299 | 0.870299 | 0.860527 | 0.803672 | 0 | 0.115326 | 0.201788 | 4,921 | 99 | 119 | 49.707071 | 0.744399 | 0.048974 | 0 | 0.625 | 0 | 0 | 0.258092 | 0.174277 | 0 | 0 | 0 | 0 | 0.261364 | 1 | 0.102273 | false | 0 | 0.034091 | 0 | 0.147727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
bba8915db9931f84c1c7228b7fc7409617493794 | 45 | py | Python | tests/test_pyutils.py | james-gloudemans/cs101 | 295e279dbb258e93d15f20499f592d2180a3f37f | [
"MIT"
] | null | null | null | tests/test_pyutils.py | james-gloudemans/cs101 | 295e279dbb258e93d15f20499f592d2180a3f37f | [
"MIT"
] | null | null | null | tests/test_pyutils.py | james-gloudemans/cs101 | 295e279dbb258e93d15f20499f592d2180a3f37f | [
"MIT"
] | null | null | null | """test_pyutils.py: Tests for pyutils.py."""
| 22.5 | 44 | 0.688889 | 7 | 45 | 4.285714 | 0.714286 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088889 | 45 | 1 | 45 | 45 | 0.731707 | 0.844444 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
bbba843e1470cf418c1efc4ae5bc9ce3c1de71a9 | 124 | py | Python | jobs/core/helper/__init__.py | minimal-job-system/job-runners | ff485b4d934071f5155421c332724712c544c548 | [
"MIT"
] | null | null | null | jobs/core/helper/__init__.py | minimal-job-system/job-runners | ff485b4d934071f5155421c332724712c544c548 | [
"MIT"
] | 7 | 2018-12-20T10:18:36.000Z | 2021-02-19T22:34:21.000Z | jobs/core/helper/__init__.py | minimal-job-system/job-runners | ff485b4d934071f5155421c332724712c544c548 | [
"MIT"
] | 1 | 2020-01-16T11:42:38.000Z | 2020-01-16T11:42:38.000Z | from core.helper.parallel_helper import ParallelHelperClass
from core.helper.collection_helper import CollectionHelperClass
| 41.333333 | 63 | 0.903226 | 14 | 124 | 7.857143 | 0.571429 | 0.145455 | 0.254545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 124 | 2 | 64 | 62 | 0.948276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
bbc9da079caef5a83fd53beb7fa6223f16b3a411 | 11,453 | py | Python | com/code/lxb/example/MysqlUtil.py | albert-bing/quantitativeTrading | c3a96d895aad3e1c728692200a68384682632f64 | [
"MIT"
] | null | null | null | com/code/lxb/example/MysqlUtil.py | albert-bing/quantitativeTrading | c3a96d895aad3e1c728692200a68384682632f64 | [
"MIT"
] | null | null | null | com/code/lxb/example/MysqlUtil.py | albert-bing/quantitativeTrading | c3a96d895aad3e1c728692200a68384682632f64 | [
"MIT"
] | null | null | null | # @Team:Big Data Group
# @Time:2020/7/6 16:10
# @Author:albert·bing
# @File:MysqlUtil.py
# @Software:PyCharm
# start your code
import pymysql
# 测试
host = '81.70.166.101'
# 生产
# host='172.21.0.49'
password = 'r1kJzB'
port = 3306
# 黄历数据入库
def insert_data_yellow_calendar(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
# sql = "select * from car_param_info limit 10;"
sql = 'insert into date_yellow_calendar(`y_day`,`gregorian_calendar`,`lunar_calendar`,`dao`,`start`,`yi`,`ji`,`chong`,\
`suici`,`tai`,`wuxing`,`cai`,`xi`,`fu`,`constellation`,`chinese_zodiac`,`xiongshen`,`jishen`) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 查询日期
def select_data_date(start_date,end_date):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
# 省名称、境外输入、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
# sql = "SELECT year_id,format_date from date_calendar_full_scale where format_date <= '"+end_date+"' and format_date >= '"+start_date+"' ORDER BY format_date"
sql = "SELECT y_date from date_calendar where y_date <= '"+end_date+"' and y_date >= '"+start_date+"' ORDER BY y_date";
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.commit()
db.close()
return result
# 星座日数据入库
def insert_data_cons_day(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_info_day(`constellation`,`con_date`,`com_fortune_index`,`love_fortune_index`,' \
'`career_index`,`wealth_index`,`health_index`,`negotiation_index`,`lucky_color`,`lucky_number`,' \
'`speed_dating_constellation`,`short_comment`,`com_fortune`,`love_fortune`,`career_fortune`,`wealth_fortune`,' \
'`health_fortune`) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 星座周数据入库
def insert_data_cons_week(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_info_wmy(`constellation`,`con_date`,`com_fortune_index`,`love_fortune_index`,' \
'`career_index`,`wealth_index`,`health_index`,`lucky_color`,`lucky_constellation`,' \
'`beware_constellation`,`short_comment`,`com_fortune`,`love_fortune`,`career_fortune`,`wealth_fortune`,' \
'`health_fortune`,`date_level`) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 星座月数据入库
def insert_data_cons_month(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_info_wmy(`constellation`,`con_date`,`com_fortune_index`,`love_fortune_index`,' \
'`career_index`,`wealth_index`,`health_index`,`short_comment`,`com_fortune`,`love_fortune`,`career_fortune`,' \
'`wealth_fortune`,`health_fortune`,`reduced_pressure`,`get_luck_way`,`date_level`) values (%s,%s,%s,%s,%s,' \
'%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 星座年数据入库
def insert_data_cons_year(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_info_wmy(`constellation`,`con_date`,`com_fortune_index`,`love_fortune_index`,' \
'`career_index`,`wealth_index`,`health_index`,`short_comment`,`com_fortune`,`love_fortune`,`career_fortune`,' \
'`wealth_fortune`,`health_fortune`,`get_luck_way`,`date_level`) values (%s,%s,%s,%s,%s,' \
'%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 星座详情码表入库
def insert_data_constellation_detail_info(data):
db = pymysql.connect(host=host, user='root', password=password, port=port,db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_detail_info(`constellation`,`date_range`,`cons_features`,`four_image_attributes`,' \
'`palace`,`yin_yang_attributes`,`biggest_features`,`supervisor_plant`,`lucky_color`,`auspicious_items`,`lucky_number`,' \
'`lucky_metal`,`performance`,`advantage`,`disadvantage`,`basic_traits`,`specific_traits`,`acting_style`,`blind_spot`,' \
'`summary`) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入日历
def insert_data_calendar(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_calendar(`y_date`,`lunar`,`week`,`solar_terms`,`gregorian_calendar`) values (%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入当日的疫情状况 --- 国内
def insert_current_epidemic_internal(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
sql = 'REPLACE INTO epi_current_detail(`date_today`,`curr_time`,`existing_diagnosis`,`ed_compare_yesterday`,`asymptomatic`,' \
'`at_compare_yesterday`,`suspected`,`se_compare_yesterday`,`existing_critical_illness`, `eci_compare_yesterday`,' \
'`cumulative_diagnosis`,`cdi_compare_yesterday`,`import_abroadz`,`ia_compare_yesterday`,`cumulative_cure`,`cc_compare_yesterday`,' \
'`cumulative_deaths`,`cde_compare_yesterday`,`foreign_or_internal`,`create_time`,`update_time`)' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入当日的疫情状况 --- 国外
def insert_current_epidemic_foreign(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
sql = 'REPLACE INTO epi_current_detail(`date_today`,`curr_time`,`existing_diagnosis`,`ed_compare_yesterday`,' \
'`cumulative_diagnosis`,`cdi_compare_yesterday`,`cumulative_cure`,`cc_compare_yesterday`,' \
'`cumulative_deaths`,`cde_compare_yesterday`,`foreign_or_internal`,`create_time`,`update_time`)' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入国内疫情的历史数据
def insert_internal_province_data(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_internal(`date_today`,`province_name`,`city_name`,`cumulative_diagnosis`,' \
'`cumulative_cure`,`cumulative_deaths`,`new_add`,`existing_diagnosis`,`create_time`,`update_time`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入国外疫情的历史数据
def insert_foreign_data(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 国家名称、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_foreign(`date_today`,`country_name`,`cumulative_diagnosis`,' \
'`cumulative_cure`,`cumulative_deaths`,`new_add`,`existing_diagnosis`,`create_time`,`update_time`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入国内省市的当日数据疫情的数据
def insert_internal_cur_day_data(data):
db = pymysql.connect(host=host, user='root', password=password, port=port,db='epidemic')
cursor = db.cursor()
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_internal(`date_today`,`province_name`,`city_name`,`new_add`,' \
'`existing_diagnosis`,`cumulative_diagnosis`,`cumulative_cure`,`cumulative_deaths`,`create_time`,`update_time`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功--国内省数据!\n")
# 将每日数据前面添加一个area_id
def insert_internal_cur_day_data_add_areaId():
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_internal_dim ( `area_id`, `date_today`, `province_name`, `city_name`, `new_add`, ' \
'`existing_diagnosis`, `cumulative_diagnosis`, `cumulative_cure`, `cumulative_deaths`, `create_time`,' \
' `update_time` ) SELECT dim.area_id, epi.date_today, epi.province_name, epi.city_name, epi.new_add,' \
' epi.existing_diagnosis, epi.cumulative_diagnosis, epi.cumulative_cure, epi.cumulative_deaths, epi.create_time,' \
' epi.update_time' \
' FROM epi_internal epi LEFT JOIN pro_city_area_dim dim ' \
'ON epi.province_name = dim.province AND epi.city_name = dim.area'
cursor.execute(sql)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入疫情小区数据
def insert_community_data(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
sql = 'REPLACE INTO epi_community(`date_today`,`province`,`city`,`district`,`street`,`middle_address`,`community`,' \
'`show_address`,`full_address`,`lng`,`lat`,`cnt_sum_certain`,`release_date`,`create_time`,`update_time`,`location`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,ST_GEOMFROMTEXT (%s));'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入境外输入的数据
def insert_import_abroad(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 省名称、境外输入、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_import_abroad(`date_today`,`province_name`,`class_name`,`new_add`,' \
'`existing_diagnosis`,`cumulative_diagnosis`,`cumulative_cure`,`cumulative_deaths`,`create_time`,`update_time`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功--境外输入数据!\n")
# 获取县区的信息
def select_area():
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 省名称、境外输入、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'SELECT area from epidemic.pro_city_area_dim;'
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.commit()
db.close()
return result
| 42.106618 | 163 | 0.660962 | 1,594 | 11,453 | 4.547051 | 0.156838 | 0.050221 | 0.069536 | 0.084989 | 0.725028 | 0.719509 | 0.711783 | 0.701297 | 0.701297 | 0.701297 | 0 | 0.003581 | 0.146686 | 11,453 | 271 | 164 | 42.261993 | 0.737952 | 0.079542 | 0 | 0.671717 | 0 | 0.080808 | 0.488005 | 0.370526 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085859 | false | 0.090909 | 0.020202 | 0 | 0.116162 | 0.075758 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
bbdae996f3007726383873c02bb57a68c0da2d21 | 15,634 | py | Python | src/unittest/python/test_progressbar.py | soda480/progress1bar | 51afa33d14c17a3d1df674363573aa7daa774ab3 | [
"Apache-2.0"
] | null | null | null | src/unittest/python/test_progressbar.py | soda480/progress1bar | 51afa33d14c17a3d1df674363573aa7daa774ab3 | [
"Apache-2.0"
] | 2 | 2021-09-10T18:56:33.000Z | 2021-10-03T03:53:18.000Z | src/unittest/python/test_progressbar.py | soda480/progress1bar | 51afa33d14c17a3d1df674363573aa7daa774ab3 | [
"Apache-2.0"
] | null | null | null | import string
import unittest
from mock import patch
from mock import call
from mock import Mock
from mock import MagicMock
from progress1bar import ProgressBar
from progress1bar.progressbar import FILL
from progress1bar.progressbar import ALIAS_WIDTH
import sys
import logging
logger = logging.getLogger(__name__)
class TestProgressBar(unittest.TestCase):
def remove_non_printable(self, item):
""" remove non printable characters from item and return
"""
return ''.join(char for char in item if char not in string.printable)
def setUp(self):
"""
"""
pass
def tearDown(self):
"""
"""
pass
@patch('progress1bar.progressbar.colorama_init')
@patch('progress1bar.progressbar.ProgressBar._get_fill')
def test__init_Should_SetDefaults_When_Called(self, get_fill_patch, *patches):
pbar = ProgressBar(aware=False, index=0)
self.assertEqual(pbar.index, 0)
self.assertEqual(pbar.regex, {})
self.assertIsNone(pbar.completed_message)
self.assertEqual(pbar._complete, False)
self.assertEqual(pbar._completed, 0)
self.assertEqual(pbar.show_completed, False)
self.assertIsNone(pbar.duration)
self.assertEqual(pbar.alias, '')
self.assertIsNone(pbar.total)
self.assertEqual(pbar._modulus_count, 0)
self.assertEqual(pbar._reset, 0)
self.assertEqual(pbar.fill, get_fill_patch.return_value)
@patch('progress1bar.progressbar.colorama_init')
@patch('progress1bar.progressbar.ProgressBar._get_fill')
def test__init_Should_SetDefaults_When_AttributesPassed(self, get_fill_patch, *patches):
pbar = ProgressBar(aware=False, index=0, total=100, regex={'key', 'value'})
self.assertEqual(pbar.index, 0)
self.assertEqual(pbar.regex, {'key', 'value'})
self.assertIsNone(pbar.completed_message)
self.assertEqual(pbar._complete, False)
self.assertEqual(pbar._completed, 0)
self.assertEqual(pbar.show_completed, False)
self.assertIsNone(pbar.duration)
self.assertEqual(pbar.alias, '')
self.assertEqual(pbar.total, 100)
self.assertEqual(pbar._modulus_count, 0)
self.assertEqual(pbar._reset, 0)
self.assertEqual(pbar.fill, get_fill_patch.return_value)
@patch('progress1bar.progressbar.colorama_init')
@patch('progress1bar.progressbar.ProgressBar._get_progress')
def test__str_Should_ReturnExpected_When_Index(self, get_progress_patch, *patches):
get_progress_patch.return_value = 'progress'
pbar = ProgressBar(aware=False, index=0)
result = str(pbar)
self.assertEqual(result, '\x1b[1m\x1b[33m\x1b[40m00\x1b[0m: progress \x1b[1m\x1b[33m\x1b[40m\x1b[0m')
@patch('progress1bar.progressbar.colorama_init')
@patch('progress1bar.progressbar.ProgressBar._get_progress')
def test__str_Should_ReturnExpected_When_NoIndex(self, get_progress_patch, *patches):
get_progress_patch.return_value = 'progress'
pbar = ProgressBar(aware=False, )
result = str(pbar)
self.assertEqual(result, 'progress \x1b[1m\x1b[33m\x1b[40m\x1b[0m')
@patch('progress1bar.progressbar.colorama_init')
@patch('progress1bar.progressbar.ProgressBar._get_progress')
def test__str_Should_ReturnExpected_When_ShowCompleted(self, get_progress_patch, *patches):
get_progress_patch.return_value = 'Processing complete'
pbar = ProgressBar(aware=False, index=0)
pbar._completed = 12
pbar.show_completed = True
str(pbar)
# self.assertEqual(result, '\x1b[1m\x1b[33m\x1b[40m00\x1b[0m: Processing c[62 chars]b[0m')
@patch('progress1bar.progressbar.colorama_init')
def test__setattr_Should_SetExpected_When_CountAndTotal(self, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar.total = 100
pbar.count = 10
self.assertEqual(pbar._modulus_count, 5)
@patch('progress1bar.progressbar.colorama_init')
def test__setattr_Should_SetExpected_When_TotalIsNone(self, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar.count = 10
self.assertEqual(pbar._modulus_count, 0)
@patch('progress1bar.progressbar.colorama_init')
def test__setattr_Should_SetExpected_When_TotalIsZero(self, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar.total = 0
self.assertEqual(pbar._complete, True)
@patch('progress1bar.progressbar.colorama_init')
@patch('progress1bar.progressbar.ProgressBar._match_count', return_value=True)
@patch('progress1bar.progressbar.ProgressBar._match_alias', return_value=False)
@patch('progress1bar.progressbar.ProgressBar._match_total', return_value=False)
def test__match_Should_CallExpected_When_Called(self, match_total_patch, match_alias_patch, match_count_patch, *patches):
pbar = ProgressBar(aware=False, index=0)
text = '--some-text--'
pbar.match(text)
match_total_patch.assert_called_once_with(text)
match_alias_patch.assert_called_once_with(text)
match_count_patch.assert_called_once_with(text)
@patch('progress1bar.progressbar.colorama_init')
@patch('progress1bar.progressbar.ProgressBar._match_count', return_value=False)
@patch('progress1bar.progressbar.ProgressBar._match_alias', return_value=False)
@patch('progress1bar.progressbar.ProgressBar._match_total', return_value=False)
def test__match_Should_CallExpected_When_CalledNoMatch(self, match_total_patch, match_alias_patch, match_count_patch, *patches):
pbar = ProgressBar(aware=False, index=0)
text = '--some-text--'
pbar.match(text)
match_total_patch.assert_called_once_with(text)
match_alias_patch.assert_called_once_with(text)
match_count_patch.assert_called_once_with(text)
@patch('progress1bar.progressbar.colorama_init')
def test__match_total_Should_ReturnMatchAndSetExpected_When_TotalIsNoneAndMatch(self, *patches):
pbar = ProgressBar(aware=False, index=0, regex={'total': r'^total is: (?P<value>\d+)$'})
text = 'total is: 100'
result = pbar._match_total(text)
self.assertEqual(pbar.total, 100)
self.assertIsNotNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_total_Should_ReturnNone_When_TotalIsSet(self, *patches):
pbar = ProgressBar(aware=False, index=0, regex={'total': r'^total is: (?P<value>\d+)$'})
text = 'total is: 100'
pbar.total = 50
result = pbar._match_total(text)
self.assertIsNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_total_Should_ReturnNone_When_TotalIsNoneAndNoRegex(self, *patches):
pbar = ProgressBar(aware=False, index=0)
text = 'total is: 100'
result = pbar._match_total(text)
self.assertIsNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_total_Should_ReturnNone_When_TotalIsNoneAndNoMatch(self, *patches):
pbar = ProgressBar(aware=False, index=0, regex={'total': r'^total is: (?P<value>\d+)$'})
text = 'count is: 100'
result = pbar._match_total(text)
self.assertIsNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_alias_Should_ReturnMatchAndSetExpected_When_RegexMatchGreaterThanWidth(self, *patches):
pbar = ProgressBar(aware=False, index=0, regex={'alias': r'^id is: (?P<value>.*)$'})
long_id = 'a' * (ALIAS_WIDTH + 10)
text = f'id is: {long_id}'
result = pbar._match_alias(text)
self.assertEqual(pbar.alias, f'{long_id[0:ALIAS_WIDTH - 3]}...')
self.assertIsNotNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_alias_Should_ReturnMatchAndSetExpected_When_RegexMatch(self, *patches):
pbar = ProgressBar(aware=False, index=0, regex={'alias': r'^id is: (?P<value>.*)$'})
text = 'id is: abc123'
result = pbar._match_alias(text)
self.assertEqual(pbar.alias, 'abc123')
self.assertIsNotNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_alias_Should_ReturnNone_When_NoRegex(self, *patches):
pbar = ProgressBar(aware=False, index=0)
text = 'id is: abc'
result = pbar._match_alias(text)
self.assertIsNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_alias_Should_ReturnNone_When_NoRegexMatch(self, *patches):
pbar = ProgressBar(aware=False, index=0, regex={'alias': r'^id is: (?P<value>.*)$'})
text = 'total is: 100'
result = pbar._match_alias(text)
self.assertIsNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_count_ShouldReturnMatchAndSetExpected_When_RegexMatch(self, *patches):
pbar = ProgressBar(aware=False, index=0, regex={'count': r'processed item'})
pbar.total = 100
text = 'processed item'
result = pbar._match_count(text)
self.assertEqual(pbar.count, 1)
self.assertIsNotNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_count_ShouldReturnNone_When_NoRegex(self, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar.total = 100
text = 'processed item'
result = pbar._match_count(text)
self.assertEqual(pbar.count, 0)
self.assertIsNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__match_count_ShouldReturnNone_When_NoRegexMatch(self, *patches):
pbar = ProgressBar(aware=False, index=0, regex={'count': r'processed widget'})
pbar.total = 100
pbar.count = 10
text = 'processed item'
result = pbar._match_count(text)
self.assertEqual(pbar.count, 10)
self.assertIsNone(result)
@patch('progress1bar.progressbar.colorama_init')
def test__get_complete_Should_ReturnExpected_When_MessageAndDuration(self, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar.completed_message = 'All done'
pbar.duration = '01:23:45'
result = pbar._get_complete()
expected_result = 'All done - 01:23:45'
self.assertEqual(result, expected_result)
@patch('progress1bar.progressbar.colorama_init')
def test__get_complete_Should_ReturnExpected_When_NoMessageAndDuration(self, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar.duration = '01:23:45'
result = pbar._get_complete()
expected_result = 'Processing complete - 01:23:45'
self.assertEqual(result, expected_result)
@patch('progress1bar.progressbar.colorama_init')
def test__get_complete_Should_ReturnExpected_When_NoMessageAndNoDuration(self, *patches):
pbar = ProgressBar(aware=False, index=0)
result = pbar._get_complete()
expected_result = 'Processing complete'
self.assertEqual(result, expected_result)
@patch('progress1bar.progressbar.colorama_init')
@patch('progress1bar.progressbar.ProgressBar._get_complete')
def test__get_progress_Should_ReturnExpected_When_Complete(self, get_complete_patch, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar._complete = True
result = pbar._get_progress()
self.assertEqual(result, get_complete_patch.return_value)
@patch('progress1bar.progressbar.colorama_init')
def test__get_progress_Should_ReturnExpected_When_NotCompleteNoTotal(self, *patches):
pbar = ProgressBar(aware=False, index=0)
result = pbar._get_progress()
self.assertTrue('##/##' in result)
@patch('progress1bar.progressbar.colorama_init')
def test__get_progress_Should_ReturnExpected_When_NotCompleteAndTotal(self, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar.total = 100
pbar.count = 50
result = pbar._get_progress()
self.assertTrue('50%' in result)
@patch('progress1bar.progressbar.colorama_init')
def test__get_progress_Should_ReturnExpected_When_NotCompleteAndCountIsTotal(self, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar.total = 100
pbar.count = 100
result = pbar._get_progress()
self.assertTrue('100%' in result)
@patch('progress1bar.progressbar.colorama_init')
def test__reset_Should_SetExpected_When_Called(self, *patches):
pbar = ProgressBar(aware=False, index=0)
pbar.reset()
pbar.reset()
self.assertEqual(pbar._reset, 2)
def test__get_fill_Should_ReturnExpected_When_NoData(self, *patches):
result = ProgressBar._get_fill(None)
expected_result = {'total': FILL, 'index': FILL, 'completed': FILL}
self.assertEqual(result, expected_result)
def test__get_fill_Should_ReturnExpected_When_Data(self, *patches):
result = ProgressBar._get_fill({'max_index': 203, 'max_total': 10000, 'max_completed': 12})
expected_result = {'total': 5, 'index': 3, 'completed': 2}
self.assertEqual(result, expected_result)
@patch('progress1bar.progressbar.cursor')
@patch('progress1bar.progressbar.sys.stderr')
def test__enter_exit_Should_HideAndShowCursor_When_AwareAndTty(self, stderr_patch, cursor_patch, *patches):
stderr_patch.isatty.return_value = True
with ProgressBar() as pb:
cursor_patch.hide.assert_called_once_with()
self.assertTrue(pb.aware)
cursor_patch.show.assert_called_once_with()
@patch('progress1bar.progressbar.cursor')
@patch('progress1bar.progressbar.sys.stderr')
def test__enter_exit_Should_NotHideOrShowCursor_When_NotTty(self, stderr_patch, cursor_patch, *patches):
stderr_patch.isatty.return_value = False
with ProgressBar() as pb:
cursor_patch.hide.assert_not_called()
self.assertTrue(pb.aware)
cursor_patch.show.assert_not_called()
@patch('progress1bar.ProgressBar._print')
@patch('progress1bar.progressbar.cursor')
@patch('progress1bar.progressbar.sys.stderr')
def test__enter_exit_Should_ClearAlias_When_ClearAlias(self, stderr_patch, cursor_patch, *patches):
stderr_patch.isatty.return_value = True
with ProgressBar(clear_alias=True) as pb:
pb.alias = 'something'
self.assertEqual(pb.alias, '')
@patch('progress1bar.progressbar.sys.stderr')
def test__print_Should_Return_When_NoTty(self, stderr_patch, *patches):
stderr_patch.isatty.return_value = False
pb = ProgressBar(aware=False)
pb._print('total')
stderr_patch.flush.assert_not_called()
@patch('builtins.print')
@patch('progress1bar.progressbar.sys.stderr')
def test__print_Should_CallExpected_When_TtyNoClear(self, stderr_patch, print_patch, *patches):
stderr_patch.isatty.return_value = True
pb = ProgressBar(aware=False)
pb.aware = True
pb.reset = 0
pb._print(False)
stderr_patch.flush.assert_called_once_with()
self.assertEqual(len(print_patch.mock_calls), 1)
@patch('builtins.print')
@patch('progress1bar.progressbar.sys.stderr')
def test__print_Should_CallExpected_When_TtyAndClear(self, stderr_patch, print_patch, *patches):
stderr_patch.isatty.return_value = True
pb = ProgressBar(aware=False)
pb.aware = True
pb._print(True)
stderr_patch.flush.assert_called_once_with()
self.assertEqual(len(print_patch.mock_calls), 2)
| 44.541311 | 132 | 0.706153 | 1,823 | 15,634 | 5.771256 | 0.095447 | 0.115864 | 0.135729 | 0.09923 | 0.836042 | 0.826632 | 0.80116 | 0.787853 | 0.74185 | 0.691474 | 0 | 0.020151 | 0.184214 | 15,634 | 350 | 133 | 44.668571 | 0.804767 | 0.01081 | 0 | 0.615385 | 0 | 0.003344 | 0.182831 | 0.138353 | 0 | 0 | 0 | 0 | 0.247492 | 1 | 0.133779 | false | 0.010033 | 0.036789 | 0 | 0.177258 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a562d2ccc5495d7c1b6dad75fe9283414fbdc955 | 91 | py | Python | sagas/modules/biz/agent_party.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | sagas/modules/biz/agent_party.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | sagas/modules/biz/agent_party.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | from sagas.nlu.warehouse_bucket import AnalBucket
class PartyAgent(AnalBucket):
pass
| 15.166667 | 49 | 0.802198 | 11 | 91 | 6.545455 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 91 | 5 | 50 | 18.2 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
a56f84c6467b121d2eee632e6f74de4625800447 | 13,224 | py | Python | test/test_plants/test_plant_costs/test_modern_plants_estimations/test_predictPlantParameters.py | alexanderkell/elecsim | 35e400809759a8e9a9baa3776344e383b13d8c54 | [
"MIT"
] | 18 | 2019-01-18T21:41:49.000Z | 2022-02-14T15:49:40.000Z | test/test_plants/test_plant_costs/test_modern_plants_estimations/test_predictPlantParameters.py | alexanderkell/elecsim | 35e400809759a8e9a9baa3776344e383b13d8c54 | [
"MIT"
] | 40 | 2020-01-28T22:37:53.000Z | 2022-03-12T01:00:07.000Z | test/test_plants/test_plant_costs/test_modern_plants_estimations/test_predictPlantParameters.py | alexanderkell/elecsim | 35e400809759a8e9a9baa3776344e383b13d8c54 | [
"MIT"
] | 3 | 2020-08-03T16:45:54.000Z | 2021-08-04T07:45:16.000Z | '''
File name: test_predictPlantStatistics
Date created: 27/11/2018
Feature: #Enter feature description here
'''
from unittest import TestCase
import pytest
from elecsim.plants.plant_costs.estimate_costs.estimate_modern_power_plant_costs.predict_modern_plant_costs import \
PredictModernPlantParameters
__author__ = "Alexander Kell"
__copyright__ = "Copyright 2018, Alexander Kell"
__license__ = "MIT"
__email__ = "alexander@kell.es"
class TestPredictPlantParameters(TestCase):
def test_parameter_estimation_for_ccgt_1200(self):
estimated_plant_parameters = PredictModernPlantParameters("CCGT", 1200, 2018).parameter_estimation()
assert estimated_plant_parameters['connection_cost_per_mw'] == 3300
assert estimated_plant_parameters['construction_cost_per_mw'] == 500000
assert estimated_plant_parameters['fixed_o_and_m_per_mw'] == 12200
assert estimated_plant_parameters['infrastructure'] == 15100
assert estimated_plant_parameters['insurance_cost_per_mw'] == 2100
assert estimated_plant_parameters['pre_dev_cost_per_mw'] == 10000
assert estimated_plant_parameters['variable_o_and_m_per_mwh'] == 3.00
assert estimated_plant_parameters['pre_dev_period'] == 3
assert estimated_plant_parameters['operating_period'] == 25
assert estimated_plant_parameters['construction_period'] == 3
assert estimated_plant_parameters['efficiency'] == 0.54
assert estimated_plant_parameters['average_load_factor'] == 0.93
assert estimated_plant_parameters['construction_spend_years'] == [0.4, 0.4, 0.2]
assert estimated_plant_parameters['pre_dev_spend_years'] == [0.44, 0.44, 0.12]
def test_parameter_estimation_for_ccgt_1335_5(self):
estimated_plant_parameters = PredictModernPlantParameters("CCGT", 1335.5, 2018).parameter_estimation()
assert estimated_plant_parameters['connection_cost_per_mw'] == 3300
assert estimated_plant_parameters['construction_cost_per_mw'] == 500000
assert estimated_plant_parameters['fixed_o_and_m_per_mw'] == 11800
assert estimated_plant_parameters['infrastructure'] == 15100
assert estimated_plant_parameters['insurance_cost_per_mw'] == 2000
assert estimated_plant_parameters['pre_dev_cost_per_mw'] == 10000
assert estimated_plant_parameters['variable_o_and_m_per_mwh'] == 3.00
assert estimated_plant_parameters['pre_dev_period'] == 3
assert estimated_plant_parameters['operating_period'] == 25
assert estimated_plant_parameters['construction_period'] == 3
assert estimated_plant_parameters['efficiency'] == 0.54
assert estimated_plant_parameters['average_load_factor'] == 0.93
assert estimated_plant_parameters['construction_spend_years'] == [0.4, 0.4, 0.2]
assert estimated_plant_parameters['pre_dev_spend_years'] == [0.44, 0.44, 0.12]
def setup_method(self, module):
self.initial_stub_cost_parameters = ['Connect_system_cost-Medium _', 'Constr_cost-Medium _',
'Fixed_cost-Medium _',
'Infra_cost-Medium _', 'Insurance_cost-Medium _', 'Pre_dev_cost-Medium _',
'Var_cost-Medium _']
def test_creation_of_parameter_names_2018(self):
predict_plant = PredictModernPlantParameters("CCGT", 1200, 2018)
cost_parameter_variables = predict_plant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2018', 'Constr_cost-Medium _2018',
'Fixed_cost-Medium _2018',
'Infra_cost-Medium _2018', 'Insurance_cost-Medium _2018',
'Pre_dev_cost-Medium _2018',
'Var_cost-Medium _2018']
def test_creation_of_parameter_names_2019(self):
predict_plant = PredictModernPlantParameters("CCGT", 1200, 2019)
cost_parameter_variables = predict_plant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2018', 'Constr_cost-Medium _2018',
'Fixed_cost-Medium _2018',
'Infra_cost-Medium _2018', 'Insurance_cost-Medium _2018',
'Pre_dev_cost-Medium _2018',
'Var_cost-Medium _2018']
def test_creation_of_parameter_names_2020(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2020)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2021(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2021)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2022(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2022)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2023(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2023)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2024(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2024)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2025(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2025)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2025', 'Constr_cost-Medium _2025',
'Fixed_cost-Medium _2025',
'Infra_cost-Medium _2025', 'Insurance_cost-Medium _2025',
'Pre_dev_cost-Medium _2025',
'Var_cost-Medium _2025']
def test_creation_of_parameter_names_high_year(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 200000)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2025', 'Constr_cost-Medium _2025',
'Fixed_cost-Medium _2025',
'Infra_cost-Medium _2025', 'Insurance_cost-Medium _2025',
'Pre_dev_cost-Medium _2025',
'Var_cost-Medium _2025']
def test_creation_of_parameter_names_low_year(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 0)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2018', 'Constr_cost-Medium _2018',
'Fixed_cost-Medium _2018',
'Infra_cost-Medium _2018', 'Insurance_cost-Medium _2018',
'Pre_dev_cost-Medium _2018',
'Var_cost-Medium _2018']
def test_check_plant_exists_fails_with_no_data(self):
with pytest.raises(ValueError) as excinfo:
PredictModernPlantParameters("Fake_Plant", 1200, 2018).check_plant_exists(
{'connection_cost_per_mw': 0, 'construction_cost_per_mw': 0, 'fixed_o_and_m_per_mw': 0,
'infrastructure': 0, 'insurance_cost_per_mw': 0, 'pre_dev_cost_per_mw': 0,
'variable_o_and_m_per_mwh': 0, 'pre_dev_period': 0, 'operating_period': 0, 'construction_period': 0,
'efficiency': 0, 'average_load_factor': 0, 'construction_spend_years': 0, 'pre_dev_spend_years': 0})
assert "No cost data for power plant of type: Fake_Plant" in str(excinfo.value)
def test_check_plant_exists_with_data(self):
PredictModernPlantParameters("Fake_Plant", 1200, 2018).check_plant_exists(
{'connection_cost_per_mw': 100, 'construction_cost_per_mw': 100, 'fixed_o_and_m_per_mw': 100,
'infrastructure': 100, 'insurance_cost_per_mw': 100, 'pre_dev_cost_per_mw': 100,
'variable_o_and_m_per_mwh': 100, 'pre_dev_period': 100, 'operating_period': 100, 'construction_period': 100,
'efficiency': 100, 'average_load_factor': 100, 'construction_spend_years': 100, 'pre_dev_spend_years': 100})
def test_estimate_non_interpolatable_parameters_for_ccgt_1200(self):
predict_modern_parameters = PredictModernPlantParameters("CCGT", 1200, 2018)
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Pre_Dur") == 3
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Operating_Period") ==25
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Constr_Dur") == 3
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Efficiency") == 0.54
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Average_Load_Factor") == 0.93
def test_estimate_non_interpolatable_parameters_for_ccgt_1450(self):
predict_modern_parameters = PredictModernPlantParameters("CCGT", 1450, 2018)
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Pre_Dur") == 3
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Operating_Period") ==25
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Constr_Dur") == 3
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Efficiency") == 0.53
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Average_Load_Factor") == 0.93
def test_payment_spread_estimator_for_ccgt_1200(self):
predict_modern_parameters = PredictModernPlantParameters("CCGT", 1200, 2018)
assert predict_modern_parameters._payment_spread_estimator("Constr") == [0.4, 0.4, 0.2]
assert predict_modern_parameters._payment_spread_estimator("Pre") == [0.44, 0.44, 0.12]
def test_payment_spread_estimator_for_ccgt_160(self):
predict_modern_parameters = PredictModernPlantParameters("CCGT", 160, 2018)
assert predict_modern_parameters._payment_spread_estimator("Constr") == [0.4, 0.4, 0.2]
assert predict_modern_parameters._payment_spread_estimator("Pre") == [0.435, 0.435, 0.13]
| 63.884058 | 125 | 0.657365 | 1,397 | 13,224 | 5.711525 | 0.103794 | 0.096503 | 0.061411 | 0.105276 | 0.857125 | 0.802858 | 0.726156 | 0.719764 | 0.706856 | 0.706856 | 0 | 0.072971 | 0.261116 | 13,224 | 206 | 126 | 64.194175 | 0.743629 | 0.007864 | 0 | 0.604938 | 0 | 0 | 0.251316 | 0.074518 | 0 | 0 | 0 | 0 | 0.32716 | 1 | 0.117284 | false | 0 | 0.018519 | 0 | 0.141975 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a5a8613f8018f051078f5ebeb02f0bd015da5082 | 8,084 | py | Python | devilry/devilry_cradmin/tests/test_devilry_listbuilder/test_period.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T22:56:23.000Z | 2020-11-10T21:28:27.000Z | devilry/devilry_cradmin/tests/test_devilry_listbuilder/test_period.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 786 | 2015-01-06T16:10:18.000Z | 2022-03-16T11:10:50.000Z | devilry/devilry_cradmin/tests/test_devilry_listbuilder/test_period.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 15 | 2015-04-06T06:18:43.000Z | 2021-02-24T12:28:30.000Z | # -*- coding: utf-8 -*-
import htmls
from django import test
from django.conf import settings
from cradmin_legacy import datetimeutils
from model_bakery import baker
from devilry.apps.core.models import Period
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_qualifiesforexam.models import Status
class TestAdminItemValue(test.TestCase):
def test_custom_cssclass(self):
testperiod = baker.make('core.Period')
selector = htmls.S(devilry_listbuilder.period.AdminItemValue(value=testperiod).render())
self.assertTrue(selector.exists('.devilry-cradmin-perioditemvalue-admin'))
def test_title(self):
testperiod = baker.make('core.Period', long_name='Test Period')
selector = htmls.S(devilry_listbuilder.period.AdminItemValue(value=testperiod).render())
self.assertEqual(
'Test Period',
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_description(self):
testperiod = baker.make('core.Period',
start_time=datetimeutils.default_timezone_datetime(2015, 1, 15),
end_time=datetimeutils.default_timezone_datetime(2015, 12, 24))
selector = htmls.S(devilry_listbuilder.period.AdminItemValue(value=testperiod).render())
self.assertEqual(
'Thursday January 15, 2015, 00:00 \u2014 Thursday December 24, 2015, 00:00',
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-description').alltext_normalized)
class TestStudentItemValue(test.TestCase):
def test_custom_cssclass(self):
testperiod = baker.make('core.Period')
selector = htmls.S(devilry_listbuilder.period.StudentItemValue(value=testperiod).render())
self.assertTrue(selector.exists('.devilry-cradmin-perioditemvalue-student'))
def test_title(self):
testperiod = baker.make('core.Period',
parentnode__long_name='Test Subject',
long_name='Test Period')
selector = htmls.S(devilry_listbuilder.period.StudentItemValue(value=testperiod).render())
self.assertEqual(
'Test Subject - Test Period',
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_description_no_assignments(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testperiod = baker.make('core.Period')
testperiod_annotated = Period.objects\
.extra_annotate_with_assignmentcount_for_studentuser(user=testuser)\
.get(id=testperiod.id)
selector = htmls.S(devilry_listbuilder.period.StudentItemValue(value=testperiod_annotated).render())
self.assertEqual(
'0 assignments',
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-description').alltext_normalized)
def test_description_single_assignment(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent', user=testuser, period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
baker.make('core.Candidate',
assignment_group__parentnode=testassignment,
relatedstudent=relatedstudent)
testperiod_annotated = Period.objects\
.extra_annotate_with_assignmentcount_for_studentuser(user=testuser)\
.get(id=testperiod.id)
selector = htmls.S(devilry_listbuilder.period.StudentItemValue(value=testperiod_annotated).render())
self.assertEqual(
'1 assignment',
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-description').alltext_normalized)
def test_description_multiple_assignments_assignment(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent', user=testuser, period=testperiod)
testassignment1 = baker.make('core.Assignment', parentnode=testperiod)
baker.make('core.Candidate',
assignment_group__parentnode=testassignment1,
relatedstudent=relatedstudent)
testassignment2 = baker.make('core.Assignment', parentnode=testperiod)
baker.make('core.Candidate',
assignment_group__parentnode=testassignment2,
relatedstudent=relatedstudent)
testperiod_annotated = Period.objects\
.extra_annotate_with_assignmentcount_for_studentuser(user=testuser)\
.get(id=testperiod.id)
selector = htmls.S(devilry_listbuilder.period.StudentItemValue(value=testperiod_annotated).render())
self.assertEqual(
'2 assignments',
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-description').alltext_normalized)
def test_no_qualified_for_final_exam_status(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testperiod = baker.make('core.Period')
testperiod_annotated = Period.objects\
.extra_annotate_with_user_qualifies_for_final_exam(user=testuser)\
.get(id=testperiod.id)
selector = htmls.S(devilry_listbuilder.period.StudentItemValue(value=testperiod_annotated).render())
self.assertFalse(selector.exists('.devilry-cradmin-perioditemvalue-student-qualifedforexam'))
def test_qualified_for_final_exam(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent', period=testperiod, user=testuser)
status = baker.make('devilry_qualifiesforexam.Status', period=testperiod,
status=Status.READY)
baker.make('devilry_qualifiesforexam.QualifiesForFinalExam',
relatedstudent=relatedstudent,
status=status,
qualifies=True)
testperiod_annotated = Period.objects\
.extra_annotate_with_user_qualifies_for_final_exam(user=testuser)\
.get(id=testperiod.id)
selector = htmls.S(devilry_listbuilder.period.StudentItemValue(value=testperiod_annotated).render())
self.assertTrue(selector.exists('.devilry-cradmin-perioditemvalue-student-qualifedforexam-yes'))
self.assertFalse(selector.exists('.devilry-cradmin-perioditemvalue-student-qualifedforexam-no'))
self.assertEqual(
'Qualified for final exam',
selector.one('.devilry-cradmin-perioditemvalue-student-qualifedforexam').alltext_normalized)
def test_not_qualified_for_final_exam(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent', period=testperiod, user=testuser)
status = baker.make('devilry_qualifiesforexam.Status', period=testperiod,
status=Status.READY)
baker.make('devilry_qualifiesforexam.QualifiesForFinalExam',
relatedstudent=relatedstudent,
status=status,
qualifies=False)
testperiod_annotated = Period.objects\
.extra_annotate_with_user_qualifies_for_final_exam(user=testuser)\
.get(id=testperiod.id)
selector = htmls.S(devilry_listbuilder.period.StudentItemValue(value=testperiod_annotated).render())
self.assertFalse(selector.exists('.devilry-cradmin-perioditemvalue-student-qualifedforexam-yes'))
self.assertTrue(selector.exists('.devilry-cradmin-perioditemvalue-student-qualifedforexam-no'))
self.assertEqual(
'NOT qualified for final exam',
selector.one('.devilry-cradmin-perioditemvalue-student-qualifedforexam').alltext_normalized)
| 54.621622 | 118 | 0.694458 | 788 | 8,084 | 6.946701 | 0.140863 | 0.050968 | 0.049872 | 0.058824 | 0.881622 | 0.880526 | 0.855681 | 0.855681 | 0.855681 | 0.817866 | 0 | 0.007337 | 0.207571 | 8,084 | 147 | 119 | 54.993197 | 0.847175 | 0.002598 | 0 | 0.653846 | 0 | 0 | 0.191043 | 0.126783 | 0 | 0 | 0 | 0 | 0.115385 | 1 | 0.084615 | false | 0 | 0.061538 | 0 | 0.161538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a5b995283d3bfefd61ac52a2e50005b0bb4c1c40 | 43 | py | Python | app/spider/__init__.py | kenmingwang/ASoulCnki | b46e738d1fe4627b42879306ff824bbec322915c | [
"Apache-2.0"
] | 384 | 2021-07-15T06:31:12.000Z | 2022-03-25T14:03:00.000Z | app/spider/__init__.py | kenmingwang/ASoulCnki | b46e738d1fe4627b42879306ff824bbec322915c | [
"Apache-2.0"
] | 14 | 2021-07-18T15:10:47.000Z | 2022-02-23T03:49:48.000Z | app/spider/__init__.py | kenmingwang/ASoulCnki | b46e738d1fe4627b42879306ff824bbec322915c | [
"Apache-2.0"
] | 37 | 2021-07-16T13:06:28.000Z | 2022-03-17T10:55:46.000Z | from . import dynamic
from . import reply
| 10.75 | 21 | 0.744186 | 6 | 43 | 5.333333 | 0.666667 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.209302 | 43 | 3 | 22 | 14.333333 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
3c4ee134bcb7899d3e5993e12e247217f6d69919 | 7,697 | py | Python | LinkedListTestBase.py | lightmanca/InterviewPrep | d77d6b8a1d9dd4f4d5b2d5ef38a5d1c9b2e50f07 | [
"Apache-2.0"
] | null | null | null | LinkedListTestBase.py | lightmanca/InterviewPrep | d77d6b8a1d9dd4f4d5b2d5ef38a5d1c9b2e50f07 | [
"Apache-2.0"
] | null | null | null | LinkedListTestBase.py | lightmanca/InterviewPrep | d77d6b8a1d9dd4f4d5b2d5ef38a5d1c9b2e50f07 | [
"Apache-2.0"
] | null | null | null | class LinkedListBase:
makeLinkedList = None
def setup_class(self):
self.makeLinkedList = lambda self, initial_data=None: []
def test_make_list(self):
print("Make list test")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.print_list()
assert list.make_array_from_list() == [1, 2, 3, 4, 5]
list.verify_list_integrity()
def test_add_item_below(self):
print("Add Item below")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.add_item_bottom(10)
list.print_list()
assert list.make_array_from_list() == [1, 2, 3, 4, 5, 10]
list.verify_list_integrity()
def test_add_item_below_empty_list(self):
print("Add Item below empty list")
list = self.makeLinkedList()
list.add_item_bottom(10)
list.print_list()
assert list.make_array_from_list() == [10]
list.verify_list_integrity()
def test_add_item_top(self):
print("Add Item top")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.add_item_top(10)
list.print_list()
assert list.make_array_from_list() == [10, 1, 2, 3, 4, 5]
list.verify_list_integrity()
def test_add_item_top_empty_list(self):
print("Add Item top empty list")
list = self.makeLinkedList()
list.add_item_top(10)
list.print_list()
assert list.make_array_from_list() == [10]
list.verify_list_integrity()
def test_add_item_at_index(self):
print("Add Item At index")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.add_item_at_index(3, 10)
list.print_list()
assert list.make_array_from_list() == [1, 2, 3, 10, 4, 5]
list.verify_list_integrity()
def test_add_item_at_index_at_top_of_list(self):
print("Add Item At index")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.add_item_at_index(0, 10)
list.print_list()
assert list.make_array_from_list() == [10, 1, 2, 3, 4, 5]
list.verify_list_integrity()
def test_add_item_at_index_item_at_end_of_list(self):
print("Add Item At index item is at end of list")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.add_item_at_index(5, 10)
list.print_list()
assert list.make_array_from_list() == [1, 2, 3, 4, 5, 10]
list.verify_list_integrity()
def test_add_item_at_index_item_past_end_of_list(self):
print("Add Item At index item is past end of list")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.add_item_at_index(10, 10)
list.print_list()
assert list.make_array_from_list() == [1, 2, 3, 4, 5, 10]
list.verify_list_integrity()
def test_remove_item_matching(self):
print("Remove item matching")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.remove_item_matching(3)
list.print_list()
assert list.make_array_from_list() == [1, 2, 4, 5]
list.verify_list_integrity()
def test_remove_item_matching_no_match(self):
print("Remove item not matching")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.remove_item_matching(10)
list.print_list()
assert list.make_array_from_list() == [1, 2, 3, 4, 5]
list.verify_list_integrity()
def test_remove_item_matching_single_item_in_list(self):
print("Remove item matching single item in list")
list = self.makeLinkedList([5])
list.remove_item_matching(5)
list.print_list()
assert list.make_array_from_list() == []
list.verify_list_integrity()
def test_remove_item_matching_empty_list(self):
print("Remove item not matching")
list = self.makeLinkedList()
list.remove_item_matching(10)
list.print_list()
assert list.make_array_from_list() == []
list.verify_list_integrity()
# -------
def test_remove_item_at_index(self):
print("Remove item at Index")
list = self.makeLinkedList([1, 2, 8, 4, 5])
list.remove_item_at_index(2)
list.print_list()
assert list.make_array_from_list() == [1, 2, 4, 5]
list.verify_list_integrity()
def test_remove_item_at_index_0(self):
print("Remove item at Index")
list = self.makeLinkedList([1, 2, 8, 4, 5])
list.remove_item_at_index(0)
list.print_list()
assert list.make_array_from_list() == [2, 8, 4, 5]
list.verify_list_integrity()
def test_remove_item_at_index_too_high(self):
print("Remove item not matching")
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.remove_item_at_index(10)
list.print_list()
assert list.make_array_from_list() == [1, 2, 3, 4, 5]
list.verify_list_integrity()
def test_remove_item_at_index_single_item(self):
print("Remove item matching single item in list")
list = self.makeLinkedList([5])
list.remove_item_at_index(0)
list.print_list()
assert list.make_array_from_list() == []
list.verify_list_integrity()
def test_remove_item_at_index_empty_list(self):
print("Remove item not matching")
list = self.makeLinkedList()
list.remove_item_matching(0)
list.print_list()
assert list.make_array_from_list() == []
list.verify_list_integrity()
def test_reverse_linked_list(self):
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.print_list()
list.rev_num_items_in_list(4)
list.print_list()
assert list.make_array_from_list() == [4, 3, 2, 1, 5]
list.verify_list_integrity()
# I wasn't sure if this should reverse the list or not. simply changing my statement
# if self.count ==0 or self.count <= num_items:
# to:
# if self.count ==0 or self.count < num_items:
# will allow this code to reverse the numbers as well.
def test_reverse_linked_list_equal_length(self):
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.print_list()
list.rev_num_items_in_list(5)
list.print_list()
assert list.make_array_from_list() == [1, 2, 3, 4, 5]
def test_reverse_linked_list_greater_length(self):
list = self.makeLinkedList([1, 2, 3, 4, 5])
list.print_list()
list.rev_num_items_in_list(6)
list.print_list()
assert list.make_array_from_list() == [1, 2, 3, 4, 5]
def test_reverse_linked_empty_list(self):
list = self.makeLinkedList([])
list.print_list()
list.rev_num_items_in_list(0)
list.print_list()
assert list.make_array_from_list() == []
def test_reverse_linked_list_different_values(self):
list = self.makeLinkedList([8, 10, -1, 20, 15, 8, 5])
list.print_list()
list.rev_num_items_in_list(6)
list.print_list()
assert list.make_array_from_list() == [8, 15, 20, -1, 10, 8, 5]
def test_reverse_linked_list_large_array(self):
input_list_array = []
reversed_list_array = []
# create our input list
for x in range(1, 2000, 2):
input_list_array.append(x)
# create our reversed list to compare to the input list.
for x in range(999, 0, -2):
reversed_list_array.append(x)
for x in range(1001, 2000, 2):
reversed_list_array.append(x)
list = self.makeLinkedList(input_list_array)
list.print_list()
list.rev_num_items_in_list(500)
list.print_list()
print(reversed_list_array)
assert list.make_array_from_list() == reversed_list_array
| 36.827751 | 89 | 0.627907 | 1,118 | 7,697 | 4.011628 | 0.085868 | 0.06243 | 0.086957 | 0.101672 | 0.865329 | 0.821405 | 0.777703 | 0.772575 | 0.757414 | 0.710145 | 0 | 0.043592 | 0.257893 | 7,697 | 208 | 90 | 37.004808 | 0.741597 | 0.041055 | 0 | 0.634286 | 0 | 0 | 0.059685 | 0 | 0 | 0 | 0 | 0 | 0.137143 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.154286 | 0.28 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b1e3fbe64b4d2ad9918dafcac4d6f1436682f014 | 94 | py | Python | kb_learning/controller/__init__.py | gregorgebhardt/kb_learning | 41e18c1238e0ea891d48aff63588366dae64e4c8 | [
"BSD-3-Clause"
] | null | null | null | kb_learning/controller/__init__.py | gregorgebhardt/kb_learning | 41e18c1238e0ea891d48aff63588366dae64e4c8 | [
"BSD-3-Clause"
] | null | null | null | kb_learning/controller/__init__.py | gregorgebhardt/kb_learning | 41e18c1238e0ea891d48aff63588366dae64e4c8 | [
"BSD-3-Clause"
] | null | null | null | from .kilobot_controller import KilobotController
from .pose_controller import PoseController
| 31.333333 | 49 | 0.893617 | 10 | 94 | 8.2 | 0.7 | 0.390244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085106 | 94 | 2 | 50 | 47 | 0.953488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b1e49c9c8432286899d23ae29a19f849a501114e | 139 | py | Python | src/error/__init__.py | Felixs/cards | af1d54ccc97fd91fe6fc0ba38365a6ee59b2dfd7 | [
"MIT"
] | null | null | null | src/error/__init__.py | Felixs/cards | af1d54ccc97fd91fe6fc0ba38365a6ee59b2dfd7 | [
"MIT"
] | null | null | null | src/error/__init__.py | Felixs/cards | af1d54ccc97fd91fe6fc0ba38365a6ee59b2dfd7 | [
"MIT"
] | null | null | null | from .out_of_cards_error import OutOfCardsError
from .not_in_hand_error import NotInHandError
from .no_players_error import NoPlayersError
| 34.75 | 47 | 0.892086 | 20 | 139 | 5.8 | 0.7 | 0.284483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.086331 | 139 | 3 | 48 | 46.333333 | 0.913386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b1ebdeda7a7c75eb18b8d32e5865848ad9b824d5 | 594 | py | Python | deepncli/genecount/main.py | emptyewer/deepncli | 9f252dc76d5ed829d0d54014a682c4ed6ff78a2e | [
"MIT"
] | null | null | null | deepncli/genecount/main.py | emptyewer/deepncli | 9f252dc76d5ed829d0d54014a682c4ed6ff78a2e | [
"MIT"
] | null | null | null | deepncli/genecount/main.py | emptyewer/deepncli | 9f252dc76d5ed829d0d54014a682c4ed6ff78a2e | [
"MIT"
] | null | null | null | import joblib.parallel as parallel
from ..utils.io import get_sam_filelist
# def lets_count(directory, input_data_folder, sam_file, summary_folder, exon_file)
# def count_genes(directory, input_data_folder, summary_folder, exon_file):
# sam_files_list = get_sam_filelist(directory, input_data_folder)
# num_cores = parallel.cpu_count()
# if len(sam_files_list) > 0:
# parallel(n_jobs=num_cores - 1)(
# parallel.delayed(lets_count)(directory, input_data_folder, sam_file, summary_folder, exon_file) for sam_file
# in
# sam_files_list)
| 37.125 | 122 | 0.725589 | 84 | 594 | 4.72619 | 0.416667 | 0.141058 | 0.18136 | 0.241814 | 0.307305 | 0.307305 | 0.307305 | 0.307305 | 0.307305 | 0.307305 | 0 | 0.004141 | 0.186869 | 594 | 15 | 123 | 39.6 | 0.817805 | 0.835017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3cd9df2cb8268f946c3a3749c3be22a5fc07838a | 172 | py | Python | Src/Clova/vendor/http/cookies.py | NishiYusuke/Line-boot-award | d77f26b9109f3cba45be5906bcb6c9314974cd92 | [
"MIT"
] | 2 | 2020-08-17T07:52:48.000Z | 2020-12-18T16:39:32.000Z | Src/Clova/vendor/http/cookies.py | NishiYusuke/Line-boot-award | d77f26b9109f3cba45be5906bcb6c9314974cd92 | [
"MIT"
] | 5 | 2020-12-15T23:40:14.000Z | 2022-02-23T15:43:18.000Z | Src/Clova/vendor/http/cookies.py | NishiYusuke/Line-boot-award | d77f26b9109f3cba45be5906bcb6c9314974cd92 | [
"MIT"
] | 4 | 2019-05-16T09:57:33.000Z | 2021-07-14T12:31:21.000Z | from __future__ import absolute_import
import sys
assert sys.version_info[0] < 3
from Cookie import *
from Cookie import Morsel # left out of __all__ on Py2.7!
| 21.5 | 61 | 0.738372 | 27 | 172 | 4.333333 | 0.703704 | 0.17094 | 0.273504 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02963 | 0.215116 | 172 | 7 | 62 | 24.571429 | 0.837037 | 0.168605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0 | true | 0 | 0.8 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a7057f2766c4ede87dc4abb6fc93705ae11241be | 2,896 | py | Python | all_models.py | RonaldsonBellande/ML_pointcloud_classification | ad938aea425249ca273e7cfd86b4e2a860885312 | [
"Apache-2.0"
] | null | null | null | all_models.py | RonaldsonBellande/ML_pointcloud_classification | ad938aea425249ca273e7cfd86b4e2a860885312 | [
"Apache-2.0"
] | null | null | null | all_models.py | RonaldsonBellande/ML_pointcloud_classification | ad938aea425249ca273e7cfd86b4e2a860885312 | [
"Apache-2.0"
] | null | null | null | from header_imports import *
class models(object):
def create_models_1(self):
model = Sequential()
model.add(Conv2D(filters=64,kernel_size=(7,7), strides = (1,1), padding="same", input_shape = self.input_shape, activation = "relu"))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32,kernel_size=(7,7), strides = (1,1), padding="same", activation = "relu"))
model.add(Dropout(0.25))
model.add(Conv2D(filters=16,kernel_size=(7,7), strides = (1,1), padding="same", activation = "relu"))
model.add(MaxPooling2D(pool_size = (1,1)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(units = self.number_classes, activation = "softmax", input_dim=2))
model.compile(loss = "binary_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
def create_models_2(self):
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding="same", activation="relu", input_shape = self.input_shape))
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), padding="same",activation="relu"))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=16, kernel_size=(3,3), strides=(1,1), padding="same",activation="relu"))
model.add(MaxPooling2D(pool_size = (1,1)))
model.add(Conv2D(filters=8, kernel_size=(3,3), strides=(1,1), padding="same",activation="relu"))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dropout(rate=0.5))
model.add(Dense(units = self.number_classes, activation="softmax"))
model.compile(loss = 'binary_crossentropy', optimizer ='adam', metrics= ['accuracy'])
return model
def create_model_3(self):
self.initial_model = Sequential()
self.MyConv(first = True)
self.MyConv()
self.MyConv()
self.MyConv()
self.initial_model.add(Flatten())
self.initial_model.add(Dense(units = self.number_classes, activation = "softmax", input_dim=2))
self.initial_model.compile(loss = "binary_crossentropy", optimizer ="adam", metrics= ["accuracy"])
return self.initial_model
def MyConv(self, first = False):
if first == False:
self.initial_model.add(Conv2D(64, (4, 4),strides = (1,1), padding="same", input_shape = self.input_shape))
else:
self.initial_model.add(Conv2D(64, (4, 4),strides = (1,1), padding="same", input_shape = self.input_shape))
self.initial_model.add(Activation("relu"))
self.initial_model.add(Dropout(0.5))
self.initial_model.add(Conv2D(32, (4, 4),strides = (1,1),padding="same"))
self.initial_model.add(Activation("relu"))
self.initial_model.add(Dropout(0.25))
| 43.223881 | 141 | 0.630525 | 382 | 2,896 | 4.664921 | 0.167539 | 0.130191 | 0.107744 | 0.089787 | 0.866442 | 0.818743 | 0.818743 | 0.76936 | 0.748036 | 0.708193 | 0 | 0.045494 | 0.203039 | 2,896 | 66 | 142 | 43.878788 | 0.726603 | 0 | 0 | 0.4 | 0 | 0 | 0.067035 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.02 | 0 | 0.18 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
596c2eda68374d4871adcd897f414ddb2c2e4ba7 | 94 | py | Python | src/aspyre/source/picker/apple/__init__.py | ComputationalCryoEM/ASPIRE | 6e6699eae532874de44b98adb7ddb2ad96c43d9d | [
"MIT"
] | null | null | null | src/aspyre/source/picker/apple/__init__.py | ComputationalCryoEM/ASPIRE | 6e6699eae532874de44b98adb7ddb2ad96c43d9d | [
"MIT"
] | 5 | 2019-06-07T13:25:29.000Z | 2019-06-18T20:34:37.000Z | src/aspyre/source/picker/apple/__init__.py | computationalcryoem/aspyre | 6e6699eae532874de44b98adb7ddb2ad96c43d9d | [
"MIT"
] | 1 | 2019-06-18T17:41:52.000Z | 2019-06-18T17:41:52.000Z | from aspyre.source.picker import ParticlePicker
class ApplePicker(ParticlePicker):
pass
| 15.666667 | 47 | 0.808511 | 10 | 94 | 7.6 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138298 | 94 | 5 | 48 | 18.8 | 0.938272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
59703fa4c291dc5b4aea1b0bb9b1af9bbe444b47 | 96 | py | Python | venv/lib/python3.8/site-packages/cachecontrol/caches/__init__.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/cachecontrol/caches/__init__.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/cachecontrol/caches/__init__.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/80/61/4e/b481fc40346f90fe1801f18887ebbd618706655c702ccd7ee9ed663cd2 | 96 | 96 | 0.895833 | 9 | 96 | 9.555556 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.4375 | 0 | 96 | 1 | 96 | 96 | 0.458333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5978638edda5b769060fb5beafb035f01e0a6bca | 16,832 | py | Python | tests/test_loop_math.py | novalegra/PyLoopKit | c275ef9490b0c528841525bf1b501e9c8805b20a | [
"BSD-2-Clause"
] | 6 | 2020-04-08T15:17:58.000Z | 2021-06-04T06:47:15.000Z | tests/test_loop_math.py | novalegra/PyLoopKit | c275ef9490b0c528841525bf1b501e9c8805b20a | [
"BSD-2-Clause"
] | 8 | 2019-08-29T01:38:41.000Z | 2021-03-11T22:58:07.000Z | tests/test_loop_math.py | novalegra/PyLoopKit | c275ef9490b0c528841525bf1b501e9c8805b20a | [
"BSD-2-Clause"
] | 5 | 2019-09-03T21:51:14.000Z | 2021-01-20T04:15:37.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 19:26:16 2019
@author: annaquinlan
Github URL: https://github.com/tidepool-org/LoopKit/blob/
57a9f2ba65ae3765ef7baafe66b883e654e08391/LoopKitTests/LoopMathTests.swift
"""
# pylint: disable=C0111, C0200, R0201, W0105
import unittest
from datetime import datetime
#from . import path_grabber # pylint: disable=unused-import
from .loop_kit_tests import load_fixture
from pyloopkit.loop_math import predict_glucose, decay_effect, subtracting, combined_sums
from pyloopkit.date import time_interval_since
class TestLoopMathFunctions(unittest.TestCase):
""" unittest class to run LoopMath tests. """
def load_glucose_effect_fixture_iso_time(self, name):
""" Load glucose effects from json file if dates are in ISO format
Output:
2 lists in (date, glucose_value) format
"""
fixture = load_fixture(
name,
".json"
)
dates = [
datetime.fromisoformat(dict_.get("date"))
for dict_ in fixture
]
glucose_values = [dict_.get("amount") for dict_ in fixture]
assert len(dates) == len(glucose_values),\
"expected output shape to match"
return (dates, glucose_values)
def load_counteraction_input_fixture(self, name):
""" Load insulin counteraction effects from json file
Arguments:
name -- name of file without the extension
Output:
3 lists in (start_date, end_date, insulin_counteraction_value) format
"""
fixture = load_fixture(name, ".json")
start_dates = [
datetime.fromisoformat(dict_.get("startDate"))
if "T" in dict_.get("startDate")
else datetime.strptime(
dict_.get("startDate"),
"%Y-%m-%d %H:%M:%S %z"
)
for dict_ in fixture
]
end_dates = [
datetime.fromisoformat(dict_.get("endDate"))
if "T" in dict_.get("endDate")
else datetime.strptime(
dict_.get("endDate"),
"%Y-%m-%d %H:%M:%S %z"
)
for dict_ in fixture
]
ice_values = [dict_.get("value") for dict_ in fixture]
assert len(start_dates) == len(end_dates) == len(ice_values),\
"expected output shape to match"
return (start_dates, end_dates, ice_values)
def load_glucose_effect_fixture_normal_time(self, name):
""" Load glucose effects from json file if dates are in format
"%Y-%m-%d %H:%M:%S %z"
Output:
2 lists in (date, glucose_value) format
"""
fixture = load_fixture(
name,
".json"
)
dates = [
datetime.strptime(
dict_.get("date"),
"%Y-%m-%d %H:%M:%S %z"
)
for dict_ in fixture
]
glucose_values = [dict_.get("value") for dict_ in fixture]
assert len(dates) == len(glucose_values),\
"expected output shape to match"
return (dates, glucose_values)
def load_sample_value_fixture(self, name):
""" Load sample values from json file
Output:
2 lists in (date, glucose_value) format
"""
fixture = load_fixture(
name,
".json"
)
dates = [
datetime.strptime(
dict_.get("startDate"),
"%Y-%m-%dT%H:%M:%S%z"
)
for dict_ in fixture
]
glucose_values = [dict_.get("value") for dict_ in fixture]
assert len(dates) == len(glucose_values),\
"expected output shape to match"
return (dates, glucose_values)
def load_glucose_history_fixture(self, name):
""" Load glucose history values from json file
Output:
2 lists in (date, glucose_value) format
"""
fixture = load_fixture(
name,
".json"
)
dates = [
datetime.fromisoformat(dict_.get("display_time"))
for dict_ in fixture
]
glucose_values = [dict_.get("glucose") for dict_ in fixture]
assert len(dates) == len(glucose_values),\
"expected output shape to match"
return (dates, glucose_values)
def load_glucose_value_fixture(self, name):
""" Load sample values from json file
Output:
2 lists in (date, glucose_value) format
"""
fixture = load_fixture(
name,
".json"
)
dates = [
datetime.fromisoformat(dict_.get("date"))
for dict_ in fixture
]
glucose_values = [dict_.get("amount") for dict_ in fixture]
assert len(dates) == len(glucose_values),\
"expected output shape to match"
return (dates, glucose_values)
def carb_effect(self):
return self.load_glucose_effect_fixture_iso_time(
"glucose_from_effects_carb_effect_input"
)
def insulin_effect(self):
return self.load_glucose_effect_fixture_iso_time(
"glucose_from_effects_insulin_effect_input"
)
""" Predict_glucose tests """
def test_predict_glucose_no_momentum(self):
glucose = self.load_glucose_history_fixture(
"glucose_from_effects_glucose_input"
)
(expected_dates,
expected_values
) = self.load_glucose_value_fixture(
"glucose_from_effects_no_momentum_output"
)
(predicted_dates,
predicted_values
) = predict_glucose(
glucose[0][0], glucose[1][0],
[], [],
*self.carb_effect(),
*self.insulin_effect()
)
self.assertEqual(
len(expected_dates), len(predicted_dates)
)
for i in range(0, len(expected_dates)):
self.assertEqual(
expected_dates[i], predicted_dates[i]
)
self.assertAlmostEqual(
expected_values[i], predicted_values[i], 3
)
def test_predict_glucose_flat_momentum(self):
glucose = self.load_glucose_history_fixture(
"glucose_from_effects_momentum_flat_glucose_input"
)
momentum = self.load_glucose_effect_fixture_iso_time(
"glucose_from_effects_momentum_flat_input"
)
(expected_dates,
expected_values
) = self.load_glucose_value_fixture(
"glucose_from_effects_momentum_flat_output"
)
(predicted_dates,
predicted_values
) = predict_glucose(
glucose[0][0], glucose[1][0],
*momentum,
*self.carb_effect(),
*self.insulin_effect()
)
self.assertEqual(
len(expected_dates), len(predicted_dates)
)
for i in range(0, len(expected_dates)):
self.assertEqual(
expected_dates[i], predicted_dates[i]
)
self.assertAlmostEqual(
expected_values[i], predicted_values[i], 3
)
def test_predict_glucose_up_momentum(self):
glucose = self.load_glucose_history_fixture(
"glucose_from_effects_glucose_input"
)
momentum = self.load_glucose_effect_fixture_iso_time(
"glucose_from_effects_momentum_up_input"
)
(expected_dates,
expected_values
) = self.load_glucose_value_fixture(
"glucose_from_effects_momentum_up_output"
)
(predicted_dates,
predicted_values
) = predict_glucose(
glucose[0][0], glucose[1][0],
*momentum,
*self.carb_effect(),
*self.insulin_effect()
)
self.assertEqual(
len(expected_dates), len(predicted_dates)
)
for i in range(0, len(expected_dates)):
self.assertEqual(
expected_dates[i], predicted_dates[i]
)
self.assertAlmostEqual(
expected_values[i], predicted_values[i], 3
)
def test_predict_glucose_down_momentum(self):
glucose = self.load_glucose_history_fixture(
"glucose_from_effects_glucose_input"
)
momentum = self.load_glucose_effect_fixture_iso_time(
"glucose_from_effects_momentum_down_input"
)
(expected_dates,
expected_values
) = self.load_glucose_value_fixture(
"glucose_from_effects_momentum_down_output"
)
(predicted_dates,
predicted_values
) = predict_glucose(
glucose[0][0], glucose[1][0],
*momentum,
*self.carb_effect(),
*self.insulin_effect()
)
self.assertEqual(
len(expected_dates), len(predicted_dates)
)
for i in range(0, len(expected_dates)):
self.assertEqual(
expected_dates[i], predicted_dates[i]
)
self.assertAlmostEqual(
expected_values[i], predicted_values[i], 3
)
def test_predict_glucose_blend_momentum(self):
glucose = self.load_glucose_history_fixture(
"glucose_from_effects_momentum_blend_glucose_input"
)
momentum = self.load_glucose_effect_fixture_iso_time(
"glucose_from_effects_momentum_blend_momentum_input"
)
insulin_effect = self.load_glucose_effect_fixture_iso_time(
"glucose_from_effects_momentum_blend_insulin_effect_input"
)
(expected_dates,
expected_values
) = self.load_glucose_value_fixture(
"glucose_from_effects_momentum_blend_output"
)
(predicted_dates,
predicted_values
) = predict_glucose(
glucose[0][0], glucose[1][0],
*momentum,
*self.carb_effect(),
*insulin_effect
)
self.assertEqual(
len(expected_dates), len(predicted_dates)
)
for i in range(0, len(expected_dates)):
self.assertEqual(
expected_dates[i], predicted_dates[i]
)
self.assertAlmostEqual(
expected_values[i], predicted_values[i], 3
)
def test_predict_glucose_starting_effects_non_zero(self):
glucose = self.load_sample_value_fixture(
"glucose_from_effects_non_zero_glucose_input"
)
insulin_effect = self.load_sample_value_fixture(
"glucose_from_effects_non_zero_insulin_input"
)
carb_effect = self.load_sample_value_fixture(
"glucose_from_effects_non_zero_carb_input"
)
(expected_dates,
expected_values
) = self.load_sample_value_fixture(
"glucose_from_effects_non_zero_output"
)
(predicted_dates,
predicted_values
) = predict_glucose(
glucose[0][0], glucose[1][0],
[], [],
*carb_effect,
*insulin_effect
)
self.assertEqual(
len(expected_dates), len(predicted_dates)
)
for i in range(0, len(expected_dates)):
self.assertEqual(
expected_dates[i], predicted_dates[i]
)
self.assertAlmostEqual(
expected_values[i], predicted_values[i], 3
)
""" Decay_effects tests """
def test_decay_effect(self):
glucose_date = datetime(2016, 2, 1, 10, 13, 20)
glucose_value = 100
starting_effect = 2
(dates,
values
) = decay_effect(
glucose_date, glucose_value,
starting_effect,
30
)
self.assertEqual(
[100, 110, 118, 124, 128, 130, 130],
values
)
start_date = dates[0]
time_deltas = []
for time in dates:
time_deltas.append(
time_interval_since(time, start_date) / 60
)
self.assertEqual(
[0, 5, 10, 15, 20, 25, 30],
time_deltas
)
(dates,
values
) = decay_effect(
glucose_date, glucose_value,
-0.5,
30
)
self.assertEqual(
[100, 97.5, 95.5, 94, 93, 92.5, 92.5],
values
)
def test_decay_effect_with_even_glucose(self):
glucose_date = datetime(2016, 2, 1, 10, 15, 0)
glucose_value = 100
starting_effect = 2
(dates,
values
) = decay_effect(
glucose_date, glucose_value,
starting_effect,
30
)
self.assertEqual(
[100, 110, 118, 124, 128, 130],
values
)
start_date = dates[0]
time_deltas = []
for time in dates:
time_deltas.append(
time_interval_since(time, start_date) / 60
)
self.assertEqual(
[0, 5, 10, 15, 20, 25],
time_deltas
)
(dates,
values
) = decay_effect(
glucose_date, glucose_value,
-0.5,
30
)
self.assertEqual(
[100, 97.5, 95.5, 94, 93, 92.5],
values
)
""" Subtracting effects tests """
def test_subtracting_carb_effect_from_ice_with_gaps(self):
insulin_counteraction_effects = self.load_counteraction_input_fixture(
"subtracting_carb_effect_counteration_input"
)
(carb_effect_starts,
carb_effect_values
) = self.load_glucose_value_fixture(
"subtracting_carb_effect_carb_input"
)
(expected_starts,
expected_values
) = self.load_glucose_effect_fixture_normal_time(
"ice_minus_carb_effect_with_gaps_output"
)
(starts,
values
) = subtracting(
*insulin_counteraction_effects,
carb_effect_starts, [], carb_effect_values,
5
)
self.assertEqual(
len(expected_starts),
len(starts)
)
for i in range(0, len(expected_starts)):
self.assertAlmostEqual(
expected_values[i], values[i], 2
)
def test_subtracting_flat_carb_effect_from_ice(self):
insulin_counteraction_effects = self.load_counteraction_input_fixture(
"subtracting_flat_carb_from_ice_counteraction_input"
)
(carb_effect_starts,
carb_effect_values
) = (
[datetime.strptime(
"2018-08-26 00:45:00+0000",
"%Y-%m-%d %H:%M:%S%z"
)],
[385.8235294117647]
)
(expected_starts,
expected_values
) = self.load_glucose_effect_fixture_normal_time(
"ice_minus_flat_carb_effect_output"
)
(starts,
values
) = subtracting(
*insulin_counteraction_effects,
carb_effect_starts, [], carb_effect_values,
5
)
self.assertEqual(
len(expected_starts),
len(starts)
)
for i in range(0, len(expected_starts)):
self.assertAlmostEqual(
expected_values[i], values[i], 2
)
""" Tests for combined_sums """
def test_combined_sums_with_gaps(self):
(input_starts,
input_values
) = self.load_glucose_effect_fixture_normal_time(
"ice_minus_carb_effect_with_gaps_output"
)
(expected_starts,
expected_ends,
expected_values
) = self.load_counteraction_input_fixture(
"combined_sums_with_gaps_output"
)
(starts,
ends,
values
) = combined_sums(
input_starts, [], input_values,
30
)
self.assertEqual(
len(expected_starts),
len(starts)
)
for i in range(0, len(expected_starts)):
self.assertEqual(
expected_starts[i], starts[i]
)
self.assertEqual(
expected_ends[i], ends[i]
)
self.assertAlmostEqual(
expected_values[i], values[i], 2
)
if __name__ == '__main__':
unittest.main()
| 28.384486 | 89 | 0.549846 | 1,699 | 16,832 | 5.104768 | 0.104179 | 0.035513 | 0.03632 | 0.040355 | 0.805258 | 0.781506 | 0.76963 | 0.742188 | 0.725816 | 0.725816 | 0 | 0.026545 | 0.364365 | 16,832 | 592 | 90 | 28.432432 | 0.784092 | 0.063154 | 0 | 0.59436 | 0 | 0 | 0.102976 | 0.073342 | 0 | 0 | 0 | 0 | 0.08243 | 1 | 0.041215 | false | 0 | 0.010846 | 0.004338 | 0.071584 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
597bcfebfb92ea1a1bce30bc28d9017a80a81ab0 | 156 | py | Python | spanet/__init__.py | Alexanders101/SPANet | 20731bb271b23f0746243e79203ff6b77556c852 | [
"BSD-3-Clause"
] | 13 | 2021-05-20T15:13:01.000Z | 2021-11-24T22:12:45.000Z | spanet/__init__.py | Alexanders101/SPANet | 20731bb271b23f0746243e79203ff6b77556c852 | [
"BSD-3-Clause"
] | null | null | null | spanet/__init__.py | Alexanders101/SPANet | 20731bb271b23f0746243e79203ff6b77556c852 | [
"BSD-3-Clause"
] | 7 | 2021-06-28T12:18:17.000Z | 2022-01-27T20:05:06.000Z | from spanet.network.jet_reconstruction import JetReconstructionModel
from spanet.dataset import JetReconstructionDataset
from spanet.options import Options
| 39 | 68 | 0.897436 | 17 | 156 | 8.176471 | 0.588235 | 0.215827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 156 | 3 | 69 | 52 | 0.965278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
598efe9b3f48ab44856256d246a5ad1a26e641b6 | 96 | py | Python | wkExercises/wk1/triangle.py | compagnb/SU20-IntermediatePython | 235d67320b753f9270f9a67d862f399943bbbbae | [
"RSA-MD",
"BSD-Source-Code"
] | null | null | null | wkExercises/wk1/triangle.py | compagnb/SU20-IntermediatePython | 235d67320b753f9270f9a67d862f399943bbbbae | [
"RSA-MD",
"BSD-Source-Code"
] | null | null | null | wkExercises/wk1/triangle.py | compagnb/SU20-IntermediatePython | 235d67320b753f9270f9a67d862f399943bbbbae | [
"RSA-MD",
"BSD-Source-Code"
] | null | null | null | import turtle
t = turtle.Pen()
t.forward(100)
t.left(120)
t.forward(100)
t.left(120)
t.forward(1 | 13.714286 | 16 | 0.71875 | 20 | 96 | 3.45 | 0.45 | 0.347826 | 0.318841 | 0.347826 | 0.666667 | 0.666667 | 0.666667 | 0.666667 | 0 | 0 | 0 | 0.149425 | 0.09375 | 96 | 7 | 17 | 13.714286 | 0.643678 | 0 | 0 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.142857 | null | null | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
59dd93990ea1cc1d6699136831cd11ccea44de82 | 26 | py | Python | build/__init__.py | FNNDSC/tslide | 9ac7eda07ab3605a7ed98b4b29789fa1c7ccde89 | [
"MIT"
] | 1 | 2020-09-23T15:16:26.000Z | 2020-09-23T15:16:26.000Z | build/__init__.py | FNNDSC/tslide | 9ac7eda07ab3605a7ed98b4b29789fa1c7ccde89 | [
"MIT"
] | 2 | 2020-04-28T17:24:52.000Z | 2020-04-28T17:25:11.000Z | build/__init__.py | FNNDSC/tslide | 9ac7eda07ab3605a7ed98b4b29789fa1c7ccde89 | [
"MIT"
] | 1 | 2020-07-04T21:30:17.000Z | 2020-07-04T21:30:17.000Z | from .build import Build
| 13 | 25 | 0.769231 | 4 | 26 | 5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.192308 | 26 | 1 | 26 | 26 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
59eb110e9a919e1c87e492c5f42ce5597df1c7db | 20 | py | Python | gapml/__init__.py | virtualdvid/CV | f01897bf2ff2f915413b803052f9e42894f413fa | [
"Apache-2.0"
] | 1 | 2019-06-06T21:06:30.000Z | 2019-06-06T21:06:30.000Z | gapml/__init__.py | virtualdvid/CV | f01897bf2ff2f915413b803052f9e42894f413fa | [
"Apache-2.0"
] | null | null | null | gapml/__init__.py | virtualdvid/CV | f01897bf2ff2f915413b803052f9e42894f413fa | [
"Apache-2.0"
] | null | null | null | from . import vision | 20 | 20 | 0.8 | 3 | 20 | 5.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15 | 20 | 1 | 20 | 20 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
abba0b83bf0b26910800deafa5d4f9681869b570 | 187 | py | Python | Alignment/OfflineValidation/python/TkAlAllInOneTool/TkAlExceptions.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Alignment/OfflineValidation/python/TkAlAllInOneTool/TkAlExceptions.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Alignment/OfflineValidation/python/TkAlAllInOneTool/TkAlExceptions.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | class AllInOneError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
self._msg = msg
return
def __str__(self):
return self._msg
| 20.777778 | 37 | 0.609626 | 21 | 187 | 4.761905 | 0.428571 | 0.28 | 0.22 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.299465 | 187 | 8 | 38 | 23.375 | 0.763359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0 | 0 | 0.142857 | 0.714286 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 6 |
abd3ece68034dd8340dfd27598c82b41b87c9ab2 | 40 | py | Python | rslgym/algorithm/utils/__init__.py | mcx/RSLGym | 9211c8c23042c7a56802751f8d7cfd4e7248d7a2 | [
"MIT"
] | 13 | 2021-04-16T07:14:48.000Z | 2022-03-14T04:20:03.000Z | rslgym/algorithm/utils/__init__.py | mcx/RSLGym | 9211c8c23042c7a56802751f8d7cfd4e7248d7a2 | [
"MIT"
] | null | null | null | rslgym/algorithm/utils/__init__.py | mcx/RSLGym | 9211c8c23042c7a56802751f8d7cfd4e7248d7a2 | [
"MIT"
] | 2 | 2021-11-02T06:22:27.000Z | 2021-12-21T06:16:17.000Z | from .helpers import ConfigurationSaver
| 20 | 39 | 0.875 | 4 | 40 | 8.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 40 | 1 | 40 | 40 | 0.972222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
abf32c3ca989b427ec513a381435c51b1a443a8d | 157 | py | Python | main/admin.py | MexsonFernandes/CustomYoloV3 | 0acde7613d3b202859b8bab21b9c3ee5432a61bf | [
"MIT"
] | null | null | null | main/admin.py | MexsonFernandes/CustomYoloV3 | 0acde7613d3b202859b8bab21b9c3ee5432a61bf | [
"MIT"
] | null | null | null | main/admin.py | MexsonFernandes/CustomYoloV3 | 0acde7613d3b202859b8bab21b9c3ee5432a61bf | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import ObjectClassModel
@admin.register(ObjectClassModel)
class ObjectClassAdmin(admin.ModelAdmin):
pass
| 19.625 | 41 | 0.821656 | 17 | 157 | 7.588235 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.11465 | 157 | 7 | 42 | 22.428571 | 0.928058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 0.4 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
2800a0adb55b8b769c631c9c4839860e05d022fe | 138 | py | Python | src/__init__.py | Felixs/cards | af1d54ccc97fd91fe6fc0ba38365a6ee59b2dfd7 | [
"MIT"
] | null | null | null | src/__init__.py | Felixs/cards | af1d54ccc97fd91fe6fc0ba38365a6ee59b2dfd7 | [
"MIT"
] | null | null | null | src/__init__.py | Felixs/cards | af1d54ccc97fd91fe6fc0ba38365a6ee59b2dfd7 | [
"MIT"
] | null | null | null | from .card_deck import CardDeck
from .card_player import CardPlayer
from .discard_pile import DiscardPile
from .card_game import CardGame
| 27.6 | 37 | 0.855072 | 20 | 138 | 5.7 | 0.6 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115942 | 138 | 4 | 38 | 34.5 | 0.934426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2801253f52c9f772c11b188e4be14a0dbc0ab4af | 4,778 | py | Python | tests/test_volumes.py | geraxe/dolib | 2728db044a65b0bba15e7bfbc633d24a21b955d0 | [
"MIT"
] | 5 | 2020-05-30T05:20:06.000Z | 2021-05-21T21:42:34.000Z | tests/test_volumes.py | geraxe/dolib | 2728db044a65b0bba15e7bfbc633d24a21b955d0 | [
"MIT"
] | 17 | 2020-05-30T08:17:10.000Z | 2021-06-20T13:26:37.000Z | tests/test_volumes.py | geraxe/dolib | 2728db044a65b0bba15e7bfbc633d24a21b955d0 | [
"MIT"
] | 3 | 2020-05-30T05:28:08.000Z | 2021-04-10T17:07:02.000Z | import pytest
from dolib.client import AsyncClient, Client
from dolib.models import Action, Snapshot, Volume
@pytest.mark.vcr
@pytest.mark.block_network()
def test_crud_volumes(client: Client) -> None:
volume = Volume(
name="dolib-test-volume",
region="fra1",
size_gigabytes=1,
)
# create volume
created_volume = client.volumes.create(volume)
assert isinstance(created_volume, Volume)
assert created_volume.id is not None
# list volumes
volumes = client.volumes.all()
assert len(volumes) > 0
# read volume
read_volume = client.volumes.get(str(created_volume.id))
assert read_volume.id == created_volume.id
assert isinstance(read_volume, Volume)
# resize volume
resize_action = client.volumes.resize(str(created_volume.id), size_gigabytes=2)
assert isinstance(resize_action, Action)
assert resize_action.status == "done"
droplet = client.droplets.all()[-1]
# attach
attach_action = client.volumes.attach(read_volume, droplet_id=droplet.id)
assert isinstance(attach_action, Action)
assert attach_action.status == "in-progress"
# detach
detach_action = client.volumes.detach(read_volume, droplet_id=droplet.id)
assert isinstance(detach_action, Action)
assert detach_action.status == "in-progress"
# attach by name
attach_action = client.volumes.attach(volume, droplet_id=droplet.id)
assert isinstance(attach_action, Action)
assert attach_action.status == "in-progress"
# detach by name
detach_action = client.volumes.detach(volume, droplet_id=droplet.id)
assert isinstance(detach_action, Action)
assert detach_action.status == "in-progress"
# try attach broken region
volume.region = None
client.volumes.attach(volume, droplet_id=droplet.id)
# create snapshot
snapshot = client.volumes.create_snapshot(
str(created_volume.id), Snapshot(name="test-volume-snapshot", tags=["test"])
)
assert snapshot.id is not None
# list snapshots
snapshots = client.volumes.snapshots(str(created_volume.id))
assert len(snapshots) > 0
# list actions
actions = client.volumes.actions(str(created_volume.id))
assert len(actions) > 0
# delete volume
client.volumes.delete(volume=created_volume)
@pytest.mark.vcr
@pytest.mark.block_network()
@pytest.mark.asyncio
async def test_async_crud_volumes(async_client: AsyncClient) -> None:
volume = Volume(
name="dolib-test-volume",
region="fra1",
size_gigabytes=1,
)
# create volume
created_volume = await async_client.volumes.create(volume)
assert isinstance(created_volume, Volume)
assert created_volume.id is not None
# list volumes
volumes = await async_client.volumes.all()
assert len(volumes) > 0
# read volume
read_volume = await async_client.volumes.get(str(created_volume.id))
assert read_volume.id == created_volume.id
assert isinstance(read_volume, Volume)
# resize volume
resize_action = await async_client.volumes.resize(
str(created_volume.id), size_gigabytes=2
)
assert isinstance(resize_action, Action)
assert resize_action.status == "done"
droplet = (await async_client.droplets.all())[-1]
# attach
attach_action = await async_client.volumes.attach(
read_volume, droplet_id=droplet.id
)
assert isinstance(attach_action, Action)
assert attach_action.status == "in-progress"
# detach
detach_action = await async_client.volumes.detach(
read_volume, droplet_id=droplet.id
)
assert isinstance(detach_action, Action)
assert detach_action.status == "in-progress"
# attach by name
attach_action = await async_client.volumes.attach(volume, droplet_id=droplet.id)
assert isinstance(attach_action, Action)
assert attach_action.status == "in-progress"
# detach by name
detach_action = await async_client.volumes.detach(volume, droplet_id=droplet.id)
assert isinstance(detach_action, Action)
assert detach_action.status == "in-progress"
# try attach broken region
volume.region = None
await async_client.volumes.attach(volume, droplet_id=droplet.id)
# create snapshot
snapshot = await async_client.volumes.create_snapshot(
str(created_volume.id), Snapshot(name="test-volume-snapshot", tags=["test"])
)
assert snapshot.id is not None
# list snapshots
snapshots = await async_client.volumes.snapshots(str(created_volume.id))
assert len(snapshots) > 0
# list actions
actions = await async_client.volumes.actions(str(created_volume.id))
assert len(actions) > 0
# delete volume
await async_client.volumes.delete(volume=created_volume)
| 30.628205 | 84 | 0.713269 | 601 | 4,778 | 5.507488 | 0.09817 | 0.102115 | 0.063444 | 0.090332 | 0.932931 | 0.917825 | 0.896677 | 0.865861 | 0.841088 | 0.841088 | 0 | 0.003616 | 0.189619 | 4,778 | 155 | 85 | 30.825806 | 0.85124 | 0.075136 | 0 | 0.5625 | 0 | 0 | 0.04235 | 0 | 0 | 0 | 0 | 0 | 0.375 | 1 | 0.010417 | false | 0 | 0.03125 | 0 | 0.041667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f9ff8cfdba99b8a5b7190e80570f788b7ecf8f50 | 30 | py | Python | symbolicManager.py | yonixw/PythonSafe | 5211acbb8055148c526c5d8ed16f094b0a2c84fb | [
"MIT"
] | 1 | 2019-03-17T16:59:06.000Z | 2019-03-17T16:59:06.000Z | symbolicManager.py | yonixw/PythonSafe | 5211acbb8055148c526c5d8ed16f094b0a2c84fb | [
"MIT"
] | null | null | null | symbolicManager.py | yonixw/PythonSafe | 5211acbb8055148c526c5d8ed16f094b0a2c84fb | [
"MIT"
] | null | null | null | # Todo add unlock\lock manager | 30 | 30 | 0.8 | 5 | 30 | 4.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 30 | 1 | 30 | 30 | 0.923077 | 0.933333 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 1 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e618162ad6760b838c14f6ba78a734d7d251b2d5 | 248 | py | Python | recipes/Standard_Python/close_matches.py | VanAurum/python-recipes | 662591d21d228b4a80d46c0ba2c16c8707eddb86 | [
"MIT"
] | 1 | 2019-05-31T11:19:17.000Z | 2019-05-31T11:19:17.000Z | recipes/Standard_Python/close_matches.py | VanAurum/python-recipes | 662591d21d228b4a80d46c0ba2c16c8707eddb86 | [
"MIT"
] | null | null | null | recipes/Standard_Python/close_matches.py | VanAurum/python-recipes | 662591d21d228b4a80d46c0ba2c16c8707eddb86 | [
"MIT"
] | 1 | 2020-01-21T21:38:58.000Z | 2020-01-21T21:38:58.000Z | from difflib import get_close_matches
def close_matches(word, possibilities, n=3, cutoff=0.6):
'''
Returns a list of close matches to word from the list possibilities.
'''
return get_close_matches(word, possibilities, n, cutoff) | 35.428571 | 74 | 0.725806 | 36 | 248 | 4.861111 | 0.583333 | 0.274286 | 0.171429 | 0.331429 | 0.342857 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014925 | 0.189516 | 248 | 7 | 75 | 35.428571 | 0.855721 | 0.274194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
0549ced21fc705d904fbf2554ed3c53e24f3fe2a | 1,645 | py | Python | apps/models.py | LMDenys91/caresis | c657651fda26d080515ea9252d3bb26110913e91 | [
"MIT"
] | 2 | 2015-06-27T15:19:41.000Z | 2015-11-04T15:44:09.000Z | apps/models.py | LMDenys91/caresis | c657651fda26d080515ea9252d3bb26110913e91 | [
"MIT"
] | null | null | null | apps/models.py | LMDenys91/caresis | c657651fda26d080515ea9252d3bb26110913e91 | [
"MIT"
] | null | null | null | from django.db import models
from django import forms
class Address(models.Model):
id = models.AutoField(primary_key=True)
address1 = models.CharField(max_length=200)
address2 = models.CharField(max_length=200)
city = models.CharField(max_length=200)
state = models.CharField(max_length=200)
zip_code = models.CharField(max_length=200)
class Patient(models.Model):
id = models.AutoField(primary_key=True)
firstName = models.CharField(max_length=200)
lastName = models.CharField(max_length=200)
address = models.CharField(max_length=200)
phone = forms.RegexField(regex=r'^\+?1?\d{9,15}$',
error_message = ("Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed."))
uber_token = models.CharField(max_length=200)
def __str__(self):
return self.firstName
class Advocate(models.Model):
id = models.AutoField(primary_key=True)
firstName = models.CharField(max_length=200)
lastName = models.CharField(max_length=200)
phone = forms.RegexField(regex=r'^\+?1?\d{9,15}$',
error_message = ("Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed."))
uber_token = models.CharField(max_length=200)
address = models.CharField(max_length=200)
def __str__(self):
return self.firstName
class Appointment(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=200)
description = models.CharField(max_length=200)
date = models.DateTimeField('Time of appointment')
address = models.CharField(max_length=200)
def __str__(self):
return self.title
| 32.9 | 135 | 0.724012 | 222 | 1,645 | 5.198198 | 0.283784 | 0.207972 | 0.249567 | 0.332756 | 0.843154 | 0.679376 | 0.679376 | 0.679376 | 0.606586 | 0.606586 | 0 | 0.057803 | 0.158663 | 1,645 | 49 | 136 | 33.571429 | 0.776012 | 0 | 0 | 0.594595 | 0 | 0 | 0.129799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.054054 | null | null | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
05660fd53dbb11d14f57fe4f69e283da813f431e | 157 | py | Python | config.py | saponew/macro_monday | 9b4899dd6b8d939711005aa35390947d967fd852 | [
"MIT"
] | null | null | null | config.py | saponew/macro_monday | 9b4899dd6b8d939711005aa35390947d967fd852 | [
"MIT"
] | null | null | null | config.py | saponew/macro_monday | 9b4899dd6b8d939711005aa35390947d967fd852 | [
"MIT"
] | null | null | null | API_KEY = 'KnAteZwHc1GpT2dULt8clY302k6WnFUCu4qGxhm5RN3DNBn2aTTJMa15KTR8xa8P'
API_SECRET = 'jRBawoOrFtmAyGSlEYextRYEzj5rkOhMjSV2TDxeFoduX0wLGj3FceYHatbJGxkh' | 52.333333 | 79 | 0.923567 | 6 | 157 | 23.833333 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125828 | 0.038217 | 157 | 3 | 79 | 52.333333 | 0.821192 | 0 | 0 | 0 | 0 | 0 | 0.810127 | 0.810127 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
059599e27aca593b984433e8009a41a6d3bf75fe | 205 | py | Python | brainrender/atlas_specific/__init__.py | crsanderford/brainrender | 6d2f1c2150ef381d08daaf1cff1ae287de0cc5e8 | [
"BSD-3-Clause"
] | 226 | 2020-10-19T13:41:36.000Z | 2022-03-29T11:22:43.000Z | brainrender/atlas_specific/__init__.py | RobertoDF/BrainRender | a92dc3b08f743721521ae233f15b1814207bf08c | [
"MIT"
] | 90 | 2020-10-14T09:52:48.000Z | 2022-03-25T15:51:09.000Z | brainrender/atlas_specific/__init__.py | RobertoDF/BrainRender | a92dc3b08f743721521ae233f15b1814207bf08c | [
"MIT"
] | 36 | 2020-10-14T13:04:14.000Z | 2022-03-25T15:31:29.000Z | from brainrender.atlas_specific.allen_brain_atlas.gene_expression import (
GeneExpressionAPI,
)
from brainrender.atlas_specific.allen_brain_atlas.streamlines import (
get_streamlines_for_region,
)
| 29.285714 | 74 | 0.843902 | 24 | 205 | 6.791667 | 0.583333 | 0.184049 | 0.245399 | 0.343558 | 0.527607 | 0.527607 | 0.527607 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 205 | 6 | 75 | 34.166667 | 0.881081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
55608a6538cae6c43f0b82702910372f0d7fd4ed | 167 | py | Python | frappe_telegram/frappe_telegram/doctype/telegram_message/test_telegram_message.py | rafatali686/frappe_telegram | 724ead04a531eddfe935acf35282684fef41cb67 | [
"MIT"
] | 16 | 2021-07-25T09:30:28.000Z | 2022-03-24T04:56:57.000Z | frappe_telegram/frappe_telegram/doctype/telegram_message/test_telegram_message.py | rafatali686/frappe_telegram | 724ead04a531eddfe935acf35282684fef41cb67 | [
"MIT"
] | 5 | 2021-08-24T18:07:13.000Z | 2022-02-03T04:26:08.000Z | frappe_telegram/frappe_telegram/doctype/telegram_message/test_telegram_message.py | rafatali686/frappe_telegram | 724ead04a531eddfe935acf35282684fef41cb67 | [
"MIT"
] | 10 | 2021-07-27T07:26:11.000Z | 2022-03-24T11:16:38.000Z | # Copyright (c) 2021, Leam Technology Systems and Contributors
# See license.txt
# import frappe
import unittest
class TestTelegramMessage(unittest.TestCase):
pass
| 18.555556 | 62 | 0.796407 | 20 | 167 | 6.65 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027778 | 0.137725 | 167 | 8 | 63 | 20.875 | 0.895833 | 0.538922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
556764b23fb095bbaac7763b2bbb82e89554eed7 | 33,353 | py | Python | pyutils/revdiff-check.py | obs145628/ml-notebooks | 08a64962e106ec569039ab204a7ae4c900783b6b | [
"MIT"
] | 1 | 2020-10-29T11:26:00.000Z | 2020-10-29T11:26:00.000Z | pyutils/revdiff-check.py | obs145628/ml-notebooks | 08a64962e106ec569039ab204a7ae4c900783b6b | [
"MIT"
] | 5 | 2021-03-18T21:33:45.000Z | 2022-03-11T23:34:50.000Z | pyutils/revdiff-check.py | obs145628/ml-notebooks | 08a64962e106ec569039ab204a7ae4c900783b6b | [
"MIT"
] | 1 | 2019-12-23T21:50:02.000Z | 2019-12-23T21:50:02.000Z | import metrics
import numpy as np
import torch
import revdiff as rd
import unittest
import utils
def get_grad(out, x):
return rd.build_node_grad(out, x)
def val(x):
return rd.build_val(x)
def get_arr_len(x):
if isinstance(x, (np.ndarray, np.generic)):
return x.size
else:
return 1
def mse(y_pred, y_true):
diff = (y_true - y_pred)
s = rd.op_sum(diff * diff, axis=0)
return (1 / len(y_pred.shape)) * s
class RDTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ck_feps = 1e-6
def ck_fequals(self, a, b, feps = None):
if feps is None:
feps = self.ck_feps
dist = metrics.tdist(a, b) / get_arr_len(a)
self.assertLess(dist, feps)
class BasicOps(RDTestCase):
def test_fdw_val(self):
x = rd.build_val(2)
y = rd.build_val(3)
self.ck_fequals(x.eval(), 2)
self.ck_fequals(y.eval(), 3)
def test_bwd_val(self):
x = rd.build_val(2)
y = rd.build_val(3)
self.ck_fequals(get_grad(x, x).eval(), 1)
self.ck_fequals(get_grad(x, y).eval(), 0)
self.ck_fequals(get_grad(y, y).eval(), 1)
self.ck_fequals(get_grad(y, x).eval(), 0)
def test_fwd_vadd(self):
x = np.array(2)
y = np.array(3)
self.ck_fequals((val(x) + val(y)).eval(), x + y)
x = np.random.randn(3, 12, 7)
y = np.random.randn(3, 12, 7)
self.ck_fequals((val(x) + val(y)).eval(), x + y)
def test_bwd_vadd(self):
x = np.array(2)
y = np.array(3)
tx = val(x)
ty = val(y)
tz = tx + ty
tlone = val(10)
self.ck_fequals(get_grad(tz, tx).eval(), 1)
self.ck_fequals(get_grad(tz, ty).eval(), 1)
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_fwd_vsub(self):
x = np.array(2)
y = np.array(3)
self.ck_fequals((val(x) - val(y)).eval(), x - y)
x = np.random.randn(3, 12, 7)
y = np.random.randn(3, 12, 7)
self.ck_fequals((val(x) - val(y)).eval(), x - y)
def test_bwd_vsub(self):
x = np.array(2)
y = np.array(3)
tx = val(x)
ty = val(y)
tz = tx - ty
tlone = val(10)
self.ck_fequals(get_grad(tz, tx).eval(), 1)
self.ck_fequals(get_grad(tz, ty).eval(), -1)
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_fwd_vmul(self):
x = np.array(2)
y = np.array(3)
self.ck_fequals((val(x) * val(y)).eval(), x * y)
x = np.random.randn(3, 12, 7)
y = np.random.randn(3, 12, 7)
self.ck_fequals((val(x) * val(y)).eval(), x * y)
def test_bwd_vmul(self):
x = np.array(2)
y = np.array(3)
tx = val(x)
ty = val(y)
tz = tx * ty
tlone = val(10)
self.ck_fequals(get_grad(tz, tx).eval(), y)
self.ck_fequals(get_grad(tz, ty).eval(), x)
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_fwd_vdiv(self):
x = np.array(2)
y = np.array(3)
self.ck_fequals((val(x) / val(y)).eval(), x / y)
x = np.random.randn(3, 12, 7)
y = np.random.randn(3, 12, 7)
self.ck_fequals((val(x) / val(y)).eval(), x / y, feps=1e-4)
def test_bwd_vdiv(self):
x = np.array(2)
y = np.array(3)
tx = val(x)
ty = val(y)
tz = tx / ty
tlone = val(10)
self.ck_fequals(get_grad(tz, tx).eval(), 1 / y)
self.ck_fequals(get_grad(tz, ty).eval(), - x/(y**2))
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_fwd_dotvv(self):
x = np.random.randn(7)
y = np.random.randn(7)
tx = val(x)
ty = val(y)
tz = rd.build_dot_vv(tx, ty)
self.ck_fequals(tz.eval(), x @ y)
def test_bwd_dotvv(self):
x = np.random.randn(7)
y = np.random.randn(7)
tx = val(x)
ty = val(y)
tlone = val(10)
tz = rd.build_dot_vv(tx, ty)
self.ck_fequals(get_grad(tz, tx).eval(), y)
self.ck_fequals(get_grad(tz, ty).eval(), x)
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_bwd_vsadd(self):
x = np.random.randn()
y = np.random.randn(12).astype(np.float32)
tx = val(x)
ty = val(y)
tz = rd.build_vsadd(tx, ty)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dy = torch.tensor(y, requires_grad=True)
dz = dx + dy
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, ty).eval(), dy.grad.data.numpy())
def test_bwd_vsmul(self):
x = np.random.randn()
y = np.random.randn(12).astype(np.float32)
tx = val(x)
ty = val(y)
tz = rd.build_vsmul(tx, ty)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dy = torch.tensor(y, requires_grad=True)
dz = dx * dy
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, ty).eval(), dy.grad.data.numpy())
def test_bwd_vsdiv(self):
x = np.random.randn()
y = np.random.rand(12).astype(np.float32) + 0.1
tx = val(x)
ty = val(y)
tz = rd.build_vsdiv(tx, ty)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dy = torch.tensor(y, requires_grad=True)
dz = dx / dy
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(te, ty).eval(), dy.grad.data.numpy())
def test_bwd_vlog(self):
x = np.random.rand(12).astype(np.float32) + 0.1
tx = val(x)
tz = rd.build_vlog(tx)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.log(dx)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
class LinReg(RDTestCase):
def test_mse(self):
y_pred = np.random.randn(46)
y_true = np.random.randn(46)
dy_pred = val(y_pred)
dy_true = val(y_true)
dloss = mse(dy_pred, dy_true)
ty_pred = torch.tensor(y_pred, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=True)
criterion = torch.nn.MSELoss()
tloss = criterion(ty_pred, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(dloss, dy_pred).eval(), ty_pred.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dy_true).eval(), ty_true.grad.data.numpy())
def test_sgd_mse(self):
X = np.random.randn(46, 7)
w = np.random.randn(7)
y_true = np.random.randn(46)
dX = val(X)
dw = val(w)
dy_true = val(y_true)
dy_pred = rd.build_dot_mv(dX, dw)
dloss = mse(dy_pred, dy_true)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=True)
ty_pred = torch.matmul(tX, tw)
utils.save_grad(ty_pred)
criterion = torch.nn.MSELoss()
tloss = criterion(ty_pred, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(get_grad(dloss, dy_pred).eval(),
utils.get_grad(ty_pred).data.numpy())
self.ck_fequals(get_grad(dloss, dy_true).eval(), ty_true.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
def test_sgd_logreg_2(self):
X = np.random.randn(46, 7).astype(np.float32)
w = np.random.randn(7).astype(np.float32)
y_true = np.random.randint(0, 2, (46)).astype(np.float32)
dX = val(X)
dw = val(w)
dy_true = val(y_true)
dy_out = rd.build_dot_mv(dX, dw)
dy_pred = rd.build_vsigmoid(dy_out)
dloss = rd.build_bce_loss(dy_out, dy_true)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=False)
ty_out = torch.matmul(tX, tw)
utils.save_grad(ty_out)
ty_pred = torch.sigmoid(ty_out)
criterion = torch.nn.BCEWithLogitsLoss(reduction='sum')
tloss = criterion(ty_out, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy())
self.ck_fequals(get_grad(dloss, dy_out).eval(),
utils.get_grad(ty_out).data.numpy())
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
def test_sgd_logreg_2_prim(self):
X = np.random.randn(46, 7).astype(np.float32)
w = np.random.randn(7).astype(np.float32)
y_true = np.random.randint(0, 2, (46)).astype(np.float32)
dX = val(X)
dw = val(w)
dy_true = val(y_true)
dy_out = rd.build_dot_mv(dX, dw)
dy_pred = rd.build_vsdiv(1, rd.build_vsadd(1, rd.build_vexp((-dy_out))))
dloss = - rd.op_sum(dy_true * rd.build_vlog(dy_pred)
+ (rd.build_vsadd(1, -dy_true))
* rd.build_vlog(rd.build_vsadd(1, -dy_pred)), axis=0)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=False)
ty_out = torch.matmul(tX, tw)
utils.save_grad(ty_out)
ty_pred = torch.sigmoid(ty_out)
criterion = torch.nn.BCEWithLogitsLoss(reduction='sum')
tloss = criterion(ty_out, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-2)
self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy())
self.ck_fequals(get_grad(dloss, dy_out).eval(),
utils.get_grad(ty_out).data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy(), feps=1e-3)
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy(), feps=1e-4)
def test_sgd_logreg_k(self):
X = np.random.randn(46, 7).astype(np.float32)
w = np.random.randn(7, 4).astype(np.float32)
y_true = np.zeros((46, 4)).astype(np.float32)
for i in range(y_true.shape[0]):
y_true[i][np.random.randint(0, y_true.shape[1])] = 1
dX = val(X)
dw = val(w)
dy_true = val(y_true)
dy_out = rd.build_dot_mm(dX, dw)
dy_pred = rd.build_softmax(dy_out)
dloss = rd.build_cross_entropy_loss(dy_out, dy_true)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=False)
ty_true = torch.argmax(ty_true, dim=1)
ty_out = torch.matmul(tX, tw)
ty_pred = torch.nn.functional.softmax(ty_out, dim=1)
utils.save_grad(ty_out)
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
tloss = criterion(ty_out, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy())
self.ck_fequals(get_grad(dloss, dy_out).eval(),
utils.get_grad(ty_out).data.numpy())
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
def test_sgd_logreg_k_l1_l2(self):
X = np.random.randn(46, 7).astype(np.float32)
w = np.random.randn(7, 4).astype(np.float32)
y_true = np.zeros((46, 4)).astype(np.float32)
for i in range(y_true.shape[0]):
y_true[i][np.random.randint(0, y_true.shape[1])] = 1
alpha_l1 = 0.53
alpha_l2 = 0.82
dX = val(X)
dw = val(w)
dw_flat = rd.build_reshape(dw, (dw.shape[0] * dw.shape[1],))
dy_true = val(y_true)
dy_out = rd.build_dot_mm(dX, dw)
dy_pred = rd.build_softmax(dy_out)
dloss = rd.build_cross_entropy_loss(dy_out, dy_true)
dloss = dloss + alpha_l1 * rd.build_norm1(dw_flat)
dloss = dloss + alpha_l2 * rd.build_dot_vv(dw_flat, dw_flat)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
tw_flat = tw.view(-1)
ty_true = torch.tensor(y_true, requires_grad=False)
ty_true = torch.argmax(ty_true, dim=1)
ty_out = torch.matmul(tX, tw)
ty_pred = torch.nn.functional.softmax(ty_out, dim=1)
utils.save_grad(ty_out)
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
tloss = criterion(ty_out, ty_true)
tloss = tloss + alpha_l1 * torch.norm(tw_flat, p=1) + alpha_l2 * torch.dot(tw_flat, tw_flat)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy())
self.ck_fequals(get_grad(dloss, dy_out).eval(),
utils.get_grad(ty_out).data.numpy())
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
class MLP(RDTestCase):
def test_layer_lin1(self):
X = np.random.randn(46, 7)
y_true = np.random.randn(46, 3)
W = np.random.randn(7, 3)
b = np.random.randn(3)
dX = val(X)
dy_true = val(y_true)
dW = val(W)
db = val(b)
dy_pred = rd.build_add_bias(rd.build_dot_mm(dX, dW), db)
dloss = mse(rd.build_reshape(dy_pred, (y_true.size,)),
rd.build_reshape(dy_true, (y_true.size,)))
tX = torch.tensor(X, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=True)
tW = torch.tensor(W, requires_grad=True)
tb = torch.tensor(b, requires_grad=True)
ty_pred = torch.matmul(tX, tW) + tb
criterion = torch.nn.MSELoss()
tloss = criterion(ty_pred, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(get_grad(dloss, dy_true).eval(), ty_true.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dW).eval(), tW.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(dloss, db).eval(), tb.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
def test_act_relu(self):
x = np.random.randn(43)
tx = val(x)
tz = rd.build_vrelu(tx)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.relu(dx)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_softmax(self):
x = np.random.randn(11, 7)
tx = val(x)
tz = rd.build_softmax(tx)
tz = rd.build_reshape(tz, (11 * 7,))
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.relu(dx).view(-1)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy(), feps=1e-1)
#self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_softplus(self):
x = np.random.randn(23)
tx = val(x)
tz = rd.build_vsoftplus(tx, 0.7)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.nn.functional.softplus(dx, 0.7)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_tanh(self):
x = np.random.randn(23)
tx = val(x)
tz = rd.build_vtanh(tx)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.tanh(dx)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_sigmoid(self):
x = np.random.randn(23)
tx = val(x)
tz = rd.build_vsigmoid(tx)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.sigmoid(dx)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_leaky_relu(self):
x = np.random.randn(43) * 4
tx = val(x)
tz = rd.build_vleaky_relu(tx, 0.05)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.nn.functional.leaky_relu(dx, 0.05)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_elu(self):
x = np.random.randn(43) * 4
tx = val(x)
tz = rd.build_velu(tx, 0.05)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.nn.functional.elu(dx, 0.05)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_mae_loss(self):
y_true = np.random.randn(17)
y_pred = np.random.randn(17)
ty_true = val(y_true)
ty_pred = val(y_pred)
te = rd.build_mae_loss(ty_pred, ty_true)
dy_true = torch.tensor(y_true, requires_grad=True)
dy_pred = torch.tensor(y_pred, requires_grad=True)
criterion = torch.nn.L1Loss(reduction='elementwise_mean')
de = criterion(dy_pred, dy_true)
de.backward()
self.ck_fequals(te.eval(), de.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, ty_pred).eval(), dy_pred.grad.data.numpy())
self.ck_fequals(get_grad(te, ty_true).eval(), dy_true.grad.data.numpy())
def test_mse_loss(self):
y_true = np.random.randn(17)
y_pred = np.random.randn(17)
ty_true = val(y_true)
ty_pred = val(y_pred)
te = rd.build_mse_loss(ty_pred, ty_true)
dy_true = torch.tensor(y_true, requires_grad=True)
dy_pred = torch.tensor(y_pred, requires_grad=True)
criterion = torch.nn.MSELoss(reduction='elementwise_mean')
de = criterion(dy_pred, dy_true)
de.backward()
self.ck_fequals(te.eval(), de.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, ty_pred).eval(), dy_pred.grad.data.numpy())
self.ck_fequals(get_grad(te, ty_true).eval(), dy_true.grad.data.numpy())
class ConvNet(RDTestCase):
def test_conv2d(self):
X = np.random.randn(2, 3, 17, 23).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
b = np.random.randn(4).astype(np.float32)
tX = val(X)
tK = val(K)
tb = val(b)
tY = rd.build_conv2d(tX, tK, 1, 1, 0, 0)
tY = rd.build_conv2d_bias_add(tY, tb)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
db = torch.tensor(b, requires_grad=True)
dY = torch.nn.functional.conv2d(dX, dK, bias=db, stride=(1, 1))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tb).eval(), db.grad.data.numpy(), feps=1e-3)
X = np.random.randn(2, 3, 17, 24).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.build_conv2d(tX, tK, 3, 4, 0, 0)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv2d(dX, dK, stride=(3, 4))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
X = np.random.randn(2, 3, 17, 24).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.build_conv2d(tX, tK, 3, 4, 6, 8)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv2d(dX, dK, stride=(3, 4), padding=(6, 8))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
X = np.random.randn(2, 3, 16, 26).astype(np.float32)
K = np.random.randn(4, 3, 6, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.build_conv2d(tX, tK, 3, 4, 7, 11)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv2d(dX, dK, stride=(3, 4), padding=(7, 11))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
def test_conv2d_transpose(self):
X = np.random.randn(2, 4, 13, 16).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
tX = val(X)
tK = val(K)
tb = val(b)
tY = rd.op_conv2d_transpose(tX, tK, 1, 1, 0, 0)
tY = rd.build_conv2d_bias_add(tY, tb)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
db = torch.tensor(b, requires_grad=True)
dY = torch.nn.functional.conv_transpose2d(dX, dK, bias=db, stride=(1, 1))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tb).eval(), db.grad.data.numpy(), feps=1e-3)
X = np.random.randn(2, 4, 5, 5).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.op_conv2d_transpose(tX, tK, 3, 4, 0, 0)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv_transpose2d(dX, dK, stride=(3, 4))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
X = np.random.randn(2, 4, 5, 5).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.op_conv2d_transpose(tX, tK, 3, 4, 6, 8)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv_transpose2d(dX, dK, stride=(3, 4), padding=(6, 8))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
X = np.random.randn(2, 4, 9, 11).astype(np.float32)
K = np.random.randn(4, 3, 6, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.op_conv2d_transpose(tX, tK, 3, 4, 7, 11)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv_transpose2d(dX, dK, stride=(3, 4), padding=(7, 11))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
def test_maxpooling(self):
X = np.random.randn(1, 1, 6, 6)
tX = val(X)
tY = rd.build_max_pooling(tX, 2, 2, 2, 2)
tYf = rd.build_reshape(tY, (-1,))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dY = torch.nn.functional.max_pool2d(dX, (2, 2), (2, 2))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
X = np.random.randn(1, 1, 5, 5)
tX = val(X)
tY = rd.build_max_pooling(tX, 2, 2, 1, 1)
tYf = rd.build_reshape(tY, (-1,))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dY = torch.nn.functional.max_pool2d(dX, (2, 2), (1, 1))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
X = np.random.randn(2, 3, 9, 7)
tX = val(X)
tY = rd.build_max_pooling(tX, 3, 2, 2, 1)
tYf = rd.build_reshape(tY, (-1,))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dY = torch.nn.functional.max_pool2d(dX, (3, 2), (2, 1))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
class ModelsBack(RDTestCase):
def test_dense1(self):
N = 17
IN_SIZE = 23
HIDDEN1_SIZE = 16
HIDDEN2_SIZE = 9
OUT_SIZE = 4
LR = 0.001
class TNet(torch.nn.Module):
def __init__(self):
super(TNet, self).__init__()
self.l1 = torch.nn.Linear(IN_SIZE , HIDDEN1_SIZE)
self.l2 = torch.nn.Linear(HIDDEN1_SIZE, HIDDEN2_SIZE)
self.l3 = torch.nn.Linear(HIDDEN2_SIZE, OUT_SIZE)
def forward(self, x):
x = x.view(-1, IN_SIZE)
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
y_logits = self.l3(x)
return y_logits
tnet = TNet()
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
class DNet(rd.Network):
def __init__(self):
super().__init__()
self.l1 = self.dense_layer(IN_SIZE , HIDDEN1_SIZE)
self.l2 = self.dense_layer(HIDDEN1_SIZE, HIDDEN2_SIZE)
self.l3 = self.dense_layer(HIDDEN2_SIZE, OUT_SIZE)
def forward(self, x):
x = rd.build_reshape(x, (-1, IN_SIZE))
x = rd.build_vrelu(self.l1(x))
x = rd.build_vrelu(self.l2(x))
y_logits = self.l3(x)
return y_logits
dnet = DNet()
X_sample = np.random.randn(N, IN_SIZE).astype(np.float32)
y_sample = np.random.randint(0, OUT_SIZE, size=N)
tparams = list(tnet.parameters())
for i in range(len(tparams)):
dnet.params_[i].update(tparams[i].data.numpy().T)
tX = torch.tensor(X_sample)
ty = torch.tensor(y_sample)
ty_logits = tnet(tX)
tloss = criterion(ty_logits, ty)
tnet.zero_grad()
tloss.backward()
dX = rd.build_val(X_sample)
dy = rd.build_val(utils.vec2one_hot(y_sample, OUT_SIZE))
dy_logits = dnet(dX)
dloss = rd.build_cross_entropy_loss(dy_logits, dy)
self.ck_fequals(ty_logits.data.numpy(), dy_logits.eval())
self.ck_fequals(tloss.data.numpy(), dloss.eval(), feps=1e-4)
tparams = list(tnet.parameters())
for i in range(len(tparams)):
grad = rd.build_node_grad(dloss, dnet.params_[i]).eval()
grad_sol = tparams[i].grad.data.numpy().T
self.ck_fequals(grad, grad_sol)
def test_conv1(self):
F = torch.nn.functional
class TNet(torch.nn.Module):
def __init__(self):
super(TNet, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 6, 5)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.fc1 = torch.nn.Linear(16 * 5 * 5, 4)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.view(x.shape[0], -1)
x = self.fc1(x)
return x
tnet = TNet()
criterion = torch.nn.MSELoss(reduction='sum')
class DNet(rd.Network):
def __init__(self):
super().__init__()
self.conv1 = self.conv2d_layer(1, 6, 5, 5)
self.conv2 = self.conv2d_layer(6, 16, 5, 5)
self.fc = self.dense_layer(16 * 5 * 5, 4)
def forward(self, x):
x = rd.build_vrelu(self.conv1(x))
x = rd.build_max_pooling(x, 2, 2, 2, 2)
x = rd.build_vrelu(self.conv2(x))
x = rd.build_max_pooling(x, 2, 2, 2, 2)
x = rd.build_reshape(x, (x.shape[0], -1))
x = self.fc(x)
return x
dnet = DNet()
X = np.random.randn(3, 1, 32, 32).astype(np.float32)
y = np.random.randn(3, 4).astype(np.float32)
tparams = list(tnet.parameters())
for i in range(len(tparams)):
if len(tparams[i].shape) == 2:
dnet.params_[i].update(tparams[i].data.numpy().T)
else:
dnet.params_[i].update(tparams[i].data.numpy())
tX = torch.tensor(X)
ty = torch.tensor(y)
ty_logits = tnet(tX)
tloss = criterion(ty_logits, ty)
tnet.zero_grad()
tloss.backward()
dX = rd.build_val(X)
dy = rd.build_val(y)
dy_logits = dnet(dX)
dloss = rd.op_mse_loss(dy_logits, dy)
self.ck_fequals(ty_logits.data.numpy(), dy_logits.eval())
self.ck_fequals(tloss.data.numpy(), dloss.eval(), feps=1e-5)
tparams = list(tnet.parameters())
for i in range(len(tparams)):
grad = rd.build_node_grad(dloss, dnet.params_[i]).eval()
if len(tparams[i].shape) == 2:
grad_sol = tparams[i].grad.data.numpy().T
else:
grad_sol = tparams[i].grad.data.numpy()
self.ck_fequals(grad, grad_sol)
if __name__ == '__main__':
unittest.main()
| 32.795477 | 100 | 0.555272 | 5,111 | 33,353 | 3.457053 | 0.046175 | 0.044145 | 0.094176 | 0.072443 | 0.880355 | 0.859302 | 0.826419 | 0.808195 | 0.799932 | 0.782614 | 0 | 0.029978 | 0.289899 | 33,353 | 1,016 | 101 | 32.827756 | 0.716053 | 0.001859 | 0 | 0.675462 | 0 | 0 | 0.001742 | 0 | 0 | 0 | 0 | 0 | 0.001319 | 1 | 0.067282 | false | 0 | 0.007916 | 0.002639 | 0.100264 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5591752c3fb0939be7165bd8fd652230903751d3 | 232 | py | Python | uchisquashsite/routes/base.py | Dant86/uchicagosqsuashwebsite | 2fad7fb326c09eb420ff08b56387c19ec6447c78 | [
"MIT"
] | null | null | null | uchisquashsite/routes/base.py | Dant86/uchicagosqsuashwebsite | 2fad7fb326c09eb420ff08b56387c19ec6447c78 | [
"MIT"
] | null | null | null | uchisquashsite/routes/base.py | Dant86/uchicagosqsuashwebsite | 2fad7fb326c09eb420ff08b56387c19ec6447c78 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template
base = Blueprint('base', __name__)
@base.route('/')
def homepage():
return render_template('index.html')
@base.route('/about')
def about():
return render_template('about.html') | 21.090909 | 44 | 0.715517 | 29 | 232 | 5.482759 | 0.517241 | 0.264151 | 0.251572 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 232 | 11 | 45 | 21.090909 | 0.783251 | 0 | 0 | 0 | 0 | 0 | 0.133047 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.125 | 0.25 | 0.625 | 0.25 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
55ba1a52f0fb73d4a345a106881142b5455ec1dd | 753 | py | Python | src/sparl/datasets/waveform.py | Hguimaraes/sparl | 4f0cc7ac4331c96784df4aca14eac2cec0de9894 | [
"MIT"
] | null | null | null | src/sparl/datasets/waveform.py | Hguimaraes/sparl | 4f0cc7ac4331c96784df4aca14eac2cec0de9894 | [
"MIT"
] | null | null | null | src/sparl/datasets/waveform.py | Hguimaraes/sparl | 4f0cc7ac4331c96784df4aca14eac2cec0de9894 | [
"MIT"
] | null | null | null | import numpy as np
from scipy import signal
def sine_wave(freq, sr, seconds, n_bits=8):
t = np.arange(int(sr*seconds))
samples = np.sin(2*np.pi*t*freq/sr).astype(np.float32)
# Convert to int and return
to_int = 2**n_bits-1
return (samples*to_int).astype(np.int16)
def square_wave(freq, sr, seconds, n_bits=8):
t = np.arange(int(sr*seconds))
samples = signal.square(2*np.pi*t*freq/sr)
# Convert to int and return
to_int = 2**n_bits-1
return (samples*to_int).astype(np.int16)
def triangle_wave(freq, sr, seconds, n_bits=8):
t = np.arange(int(sr*seconds))
samples = signal.sawtooth(2*np.pi*t*freq/sr)
# Convert to int and return
to_int = 2**n_bits-1
return (samples*to_int).astype(np.int16) | 28.961538 | 58 | 0.670651 | 137 | 753 | 3.576642 | 0.248175 | 0.091837 | 0.061224 | 0.104082 | 0.814286 | 0.814286 | 0.789796 | 0.789796 | 0.789796 | 0.789796 | 0 | 0.032573 | 0.184595 | 753 | 26 | 59 | 28.961538 | 0.765472 | 0.102258 | 0 | 0.529412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.117647 | 0 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e98f0627685a0bd8f709681511bd8b217fb9a12a | 44 | py | Python | src/CovidClassifier/__init__.py | LukasHaas/cs329s-covid-prediction | bd73935e1141e72f005389013ba2fa772657b53f | [
"MIT"
] | 8 | 2021-06-09T04:56:53.000Z | 2022-03-10T17:13:17.000Z | src/CovidClassifier/__init__.py | LukasHaas/cs329s-covid-prediction | bd73935e1141e72f005389013ba2fa772657b53f | [
"MIT"
] | null | null | null | src/CovidClassifier/__init__.py | LukasHaas/cs329s-covid-prediction | bd73935e1141e72f005389013ba2fa772657b53f | [
"MIT"
] | 3 | 2021-05-28T01:10:07.000Z | 2021-11-20T13:47:44.000Z | from .CovidClassifier import CovidClassifier | 44 | 44 | 0.909091 | 4 | 44 | 10 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068182 | 44 | 1 | 44 | 44 | 0.97561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e9d81bd23bf1515d6b9079bc0671005ccb7c17c9 | 103 | py | Python | cpu_load_generator/__init__.py | texdade/CPULoadGenerator | 1495339e8d08ca6ec5f40400102fe6e607545150 | [
"MIT"
] | 4 | 2021-07-30T23:21:42.000Z | 2021-11-23T11:13:45.000Z | cpu_load_generator/__init__.py | texdade/CPULoadGenerator | 1495339e8d08ca6ec5f40400102fe6e607545150 | [
"MIT"
] | null | null | null | cpu_load_generator/__init__.py | texdade/CPULoadGenerator | 1495339e8d08ca6ec5f40400102fe6e607545150 | [
"MIT"
] | 1 | 2021-09-26T13:13:37.000Z | 2021-09-26T13:13:37.000Z | from cpu_load_generator._interface import load_single_core, load_all_cores, from_profile # noqa: F401
| 51.5 | 102 | 0.854369 | 16 | 103 | 5 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 0.097087 | 103 | 1 | 103 | 103 | 0.827957 | 0.097087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e9db8b1d1d0d91072f3331a129f2f6254c3f6411 | 19 | py | Python | python/testData/resolve/multiFile/relativeAndSameDirectoryImports/sameDirectoryImportsNotCached/main.py | Starmel/intellij-community | adb89951109732e585d04f33e3fabbc9f9d3b256 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/resolve/multiFile/relativeAndSameDirectoryImports/sameDirectoryImportsNotCached/main.py | Starmel/intellij-community | adb89951109732e585d04f33e3fabbc9f9d3b256 | [
"Apache-2.0"
] | null | null | null | python/testData/resolve/multiFile/relativeAndSameDirectoryImports/sameDirectoryImportsNotCached/main.py | Starmel/intellij-community | adb89951109732e585d04f33e3fabbc9f9d3b256 | [
"Apache-2.0"
] | null | null | null | import os
print(os) | 9.5 | 9 | 0.789474 | 4 | 19 | 3.75 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 19 | 2 | 10 | 9.5 | 0.882353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
75630dfbb5a5bd4b1fb7196e1d2ebeaa03920050 | 215 | py | Python | webserver/views/cheats.py | RedSoloFox/BOTW-Live-Map | 248214fc1a7ecbd44b96ae051b1570e77dbd573b | [
"MIT"
] | 5 | 2017-06-19T04:27:52.000Z | 2021-09-29T08:57:21.000Z | webserver/views/cheats.py | RedSoloFox/BOTW-Live-Map | 248214fc1a7ecbd44b96ae051b1570e77dbd573b | [
"MIT"
] | null | null | null | webserver/views/cheats.py | RedSoloFox/BOTW-Live-Map | 248214fc1a7ecbd44b96ae051b1570e77dbd573b | [
"MIT"
] | 1 | 2021-09-29T08:57:23.000Z | 2021-09-29T08:57:23.000Z | from webserver import app
from webserver.utils.decorators import connect_required
from flask import render_template
@app.route('/cheats/')
#@connect_required
def cheats():
return render_template('cheats.html') | 23.888889 | 55 | 0.8 | 28 | 215 | 6 | 0.571429 | 0.154762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106977 | 215 | 9 | 56 | 23.888889 | 0.875 | 0.07907 | 0 | 0 | 0 | 0 | 0.09596 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | true | 0 | 0.5 | 0.166667 | 0.833333 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
758f7f8e7c0e6ef85ac90364390ac5a53832e61e | 179 | py | Python | python_path_root3/package1/subpackage1/submodule11.py | mjscosta/nuitka_testcases | f075259230d93ff6973e0b439ed794a0e0db5352 | [
"Apache-2.0"
] | null | null | null | python_path_root3/package1/subpackage1/submodule11.py | mjscosta/nuitka_testcases | f075259230d93ff6973e0b439ed794a0e0db5352 | [
"Apache-2.0"
] | null | null | null | python_path_root3/package1/subpackage1/submodule11.py | mjscosta/nuitka_testcases | f075259230d93ff6973e0b439ed794a0e0db5352 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
def submodule11_f1( var1 ):
print_str = "" + __name__ + " : function submodule11_f1: " + var1
print(print_str, end='')
print()
| 25.571429 | 70 | 0.670391 | 21 | 179 | 5.095238 | 0.571429 | 0.242991 | 0.317757 | 0.411215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.056738 | 0.212291 | 179 | 6 | 71 | 29.833333 | 0.702128 | 0 | 0 | 0 | 0 | 0 | 0.156425 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.4 | 0.8 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
75cdc52c893c9a0559454d61a8b36a9d85013c18 | 5,927 | py | Python | appengine/app/insert-southamerica.py | Yatish-Mullaji/python-docs-samples | 05064827bd2533a4c74aedfef2f190062998c62d | [
"Apache-2.0"
] | 1 | 2020-05-07T02:21:17.000Z | 2020-05-07T02:21:17.000Z | appengine/standard_python37/PlanetOnFire-app/insert-southamerica.py | Yatish-Mullaji/python-docs-samples | 05064827bd2533a4c74aedfef2f190062998c62d | [
"Apache-2.0"
] | null | null | null | appengine/standard_python37/PlanetOnFire-app/insert-southamerica.py | Yatish-Mullaji/python-docs-samples | 05064827bd2533a4c74aedfef2f190062998c62d | [
"Apache-2.0"
] | null | null | null |
import json
import pymongo
import glob
import os
# Setup connection to mongodb
#conn = "mongodb+srv://Yatish:1234@cluster0-4l19n.mongodb.net/test?retryWrites=true&w=majority"
#client = pymongo.MongoClient(conn)
client = pymongo.MongoClient("mongodb+srv://Yatish:1234@cluster0-4l19n.mongodb.net/HistoricData?retryWrites=true&w=majority")
db = client.test
# Select database and collection to use
db = client.HistoricData
collection = db.southamerica
country_name = []
path = 'historical_data/South_America'
print(glob.glob(path))
for i, filename in enumerate(glob.glob(path+"/*")):
country_name.append(filename)
print(country_name)
collection.delete_many({})
for country in country_name:
with open(country) as f:
file_data = json.load(f)
collection.insert_many(file_data)
client.close()
print("upload done")
# with open('historical_data/aruba.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/bahamas.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/barbados.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/belize.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/bermuda.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/britishvirgin.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/canada.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/caymanislands.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/costarica.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/cuba.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/curacao.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/dominica.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/dominicanrepublic.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/elsalvador.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/greenland.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/grenada.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/guadeloupe.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/guatemala.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/haiti.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/honduras.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/jamaica.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/martinique.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/mexico.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/monserrat.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/nicaragua.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/panama.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/puertorico.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/saintkittsandnevis.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/saintmartin.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/sintmaarten.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/trinidadandtobago.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/turkandcaicos.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/usa.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/usaminor.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
# with open('historical_data/usavirgin.json') as f:
# file_data = json.load(f)
# collection.insert_many(file_data)
# client.close()
| 27.567442 | 125 | 0.685844 | 815 | 5,927 | 4.803681 | 0.112883 | 0.147126 | 0.064368 | 0.101149 | 0.791315 | 0.791315 | 0.791315 | 0.791315 | 0.769349 | 0.769349 | 0 | 0.003263 | 0.172769 | 5,927 | 214 | 126 | 27.696262 | 0.795227 | 0.835667 | 0 | 0 | 0 | 0.047619 | 0.163241 | 0.147521 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
75e6bc4162699ccda7a75697dfd5999a0de09edf | 68 | py | Python | scienv/lib/__init__.py | pyKrzysztof/sci-env | 7fbcde89e57f90a87ca0d34604e6451ead06c8c6 | [
"MIT"
] | null | null | null | scienv/lib/__init__.py | pyKrzysztof/sci-env | 7fbcde89e57f90a87ca0d34604e6451ead06c8c6 | [
"MIT"
] | null | null | null | scienv/lib/__init__.py | pyKrzysztof/sci-env | 7fbcde89e57f90a87ca0d34604e6451ead06c8c6 | [
"MIT"
] | null | null | null | from .buttons import *
from .scaled import *
from .scrolled import * | 22.666667 | 23 | 0.75 | 9 | 68 | 5.666667 | 0.555556 | 0.392157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.161765 | 68 | 3 | 23 | 22.666667 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
75ec21fa7e4b559dfb28d644a3c9d3ec71aca408 | 35 | py | Python | PEP8.py | lcarlin/guppe | a0ee7b85e8687e8fb8243fbb509119a94bc6460f | [
"Apache-2.0"
] | 1 | 2021-12-18T15:29:24.000Z | 2021-12-18T15:29:24.000Z | PEP8.py | lcarlin/guppe | a0ee7b85e8687e8fb8243fbb509119a94bc6460f | [
"Apache-2.0"
] | null | null | null | PEP8.py | lcarlin/guppe | a0ee7b85e8687e8fb8243fbb509119a94bc6460f | [
"Apache-2.0"
] | 3 | 2021-08-23T22:45:20.000Z | 2022-02-17T13:17:09.000Z | """
PEP8 do python
"""
import this | 7 | 14 | 0.628571 | 5 | 35 | 4.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 0.2 | 35 | 5 | 15 | 7 | 0.75 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f933fd71d2fd6c35fc5fd9c63bd0ff76f72f146a | 5,464 | py | Python | tests/integration/api/account/test_account_update.py | marcosricardoss/flask-restful-skeleton | 59792c25189d2b3ee7a09d5167ced7835ce1819b | [
"Unlicense",
"MIT"
] | 10 | 2019-06-26T02:14:44.000Z | 2022-03-29T12:55:21.000Z | tests/integration/api/account/test_account_update.py | marcosricardoss/flask-docker-boilerplate | 5f0fead79e2c38e7231d8570ce64389620940a20 | [
"MIT"
] | 3 | 2019-12-26T17:28:44.000Z | 2020-03-10T19:35:07.000Z | tests/integration/api/account/test_account_update.py | marcosricardoss/flask-docker-boilerplate | 5f0fead79e2c38e7231d8570ce64389620940a20 | [
"MIT"
] | 4 | 2020-04-23T06:30:18.000Z | 2021-12-07T18:07:39.000Z | """It contains tests for the account updating endpoint."""
from datetime import datetime
from flask import json
from tests.util import create_user, create_tokens, get_unique_username
def test_update_account_with_data_well_formatted_returning_200_status_code(client, session):
"""
GIVEN a Flask application
WHEN the '/account' URL is requested (PUT)
THEN check the response is valid
"""
user = create_user(session)
tokens = create_tokens(user.username)
endpoint = '/account'
data = {'password': "x123x"}
response = client.put(endpoint,
data=json.dumps(data),
content_type='application/json',
headers={'Authorization': 'Bearer ' + tokens['access']['enconded']})
assert response.status_code == 200
assert response.json['status'] == 'success'
assert int(response.json['data']['id']) == user.id
assert response.json['data']['username'] == user.username
def test_update_account_with_password_length_smaller_than_3_character_returning_400_status_code(client, session):
"""
GIVEN a Flask application
WHEN the '/account' URL is requested (PUT) with invalid password value
THEN check the response HTTP 400 response
"""
user = create_user(session)
tokens = create_tokens(user.username)
endpoint = '/account'
data = {'password': "xx"}
response = client.put(endpoint,
data=json.dumps(data),
content_type='application/json',
headers={'Authorization': 'Bearer ' + tokens['access']['enconded']})
assert response.status_code == 400
assert response.json['status'] == 'fail'
assert {"password": "minimum length of 3 characters"} in response.json['data']
def test_update_account_with_an_user_already_excluded_returning_404_status_code(client, session):
"""
GIVEN a Flask application
WHEN the '/account' URL is requested (PUT) with inexistent user
THEN check the response HTTP 404 response
"""
user = create_user(session)
tokens = create_tokens(user.username)
# delete the user
session.delete(user)
session.commit()
# request
response = client.put('/account',
content_type='application/json',
headers={'Authorization': 'Bearer ' + tokens['access']['enconded']})
# asserts
assert response.status_code == 404
assert response.json['status'] == 'error'
assert response.json['message'] == 'not Found'
def test_update_account_without_data_returning_400_status_code(client, session):
"""
GIVEN a Flask application
WHEN the '/account' URL is requested (PUT) without data
THEN check the response HTTP 400 response
"""
user = create_user(session)
tokens = create_tokens(user.username)
endpoint = '/account'
response = client.put(endpoint,
content_type='application/json',
headers={'Authorization': 'Bearer ' + tokens['access']['enconded']})
assert response.status_code == 400
assert response.json['status'] == 'fail'
assert response.json['message'] == 'bad request'
def test_update_account_without_request_content_type_returning_400_status_code(client, session):
"""
GIVEN a Flask application
WHEN the '/account' URL is requested (PUT) without the request content type
THEN check the response HTTP 400 response
"""
user = create_user(session)
tokens = create_tokens(user.username)
endpoint = '/account'
response = client.put(endpoint,
headers={'Authorization': 'Bearer ' + tokens['access']['enconded']})
assert response.status_code == 400
assert response.json['status'] == 'fail'
assert response.json['message'] == 'bad request'
def test_update_account_with_empty_data_returning_400_status_code(client, session):
"""
GIVEN a Flask application
WHEN the '/account' URL is requested (PUT) with empty data
THEN check the response HTTP 400 response
"""
user = create_user(session)
tokens = create_tokens(user.username)
endpoint = '/account'
data = {}
response = client.put(endpoint,
data=json.dumps(data),
content_type='application/json',
headers={'Authorization': 'Bearer ' + tokens['access']['enconded']})
assert response.status_code == 400
assert response.json['status'] == 'fail'
assert {'password': 'must be filled'} in response.json['data']
def test_update_account_without_password_returning_400_status_code(client, session):
"""
GIVEN a Flask application
WHEN the '/account' URL is requested (PUT) without password passed
THEN check the response HTTP 400 response
"""
user = create_user(session)
tokens = create_tokens(user.username)
endpoint = '/account'
data = {'username': 'user'}
response = client.put(endpoint,
data=json.dumps(data),
content_type='application/json',
headers={'Authorization': 'Bearer ' + tokens['access']['enconded']})
assert response.status_code == 400
assert response.json['status'] == 'fail'
assert not {'username': 'must be filled'} in response.json['data']
assert {'password': 'must be filled'} in response.json['data'] | 37.944444 | 113 | 0.647694 | 616 | 5,464 | 5.582792 | 0.152597 | 0.073277 | 0.057575 | 0.04071 | 0.797034 | 0.775807 | 0.757197 | 0.748473 | 0.731608 | 0.71009 | 0 | 0.015644 | 0.239568 | 5,464 | 144 | 114 | 37.944444 | 0.812034 | 0.18082 | 0 | 0.686747 | 0 | 0 | 0.159935 | 0 | 0 | 0 | 0 | 0 | 0.277108 | 1 | 0.084337 | false | 0.084337 | 0.036145 | 0 | 0.120482 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
f939e6e4e133e64c019924f72ee32505b97f6cf6 | 5,485 | py | Python | scripts/dataset_generator/image_writers.py | matteo-rizzo/MotionClassificationCNN | 850cdfa2e0f536494c91d31a8cfd6826f2de52d5 | [
"MIT"
] | 4 | 2019-10-22T12:40:39.000Z | 2020-05-05T01:55:46.000Z | scripts/dataset_generator/image_writers.py | matteo-rizzo/MotionClassificationCNN | 850cdfa2e0f536494c91d31a8cfd6826f2de52d5 | [
"MIT"
] | null | null | null | scripts/dataset_generator/image_writers.py | matteo-rizzo/MotionClassificationCNN | 850cdfa2e0f536494c91d31a8cfd6826f2de52d5 | [
"MIT"
] | null | null | null | import os
from PIL import Image, ImageDraw, ImageFilter
# from tqdm import tqdm
def blur_image(image, radius=2, fallback_mode='L'):
"""
Blur an image using Gaussian Blur with specified radius. Other possible blur filters are described
here: https://pillow.readthedocs.io/en/3.1.x/reference/ImageFilter.html
IMPORTANT: filters are not applicable to mode '1'. If image mode is '1' image will be converted to
specified fallback_mode, which is L by default
More details on modes
here: https://pillow.readthedocs.io/en/5.1.x/handbook/concepts.html#concept-modes
:param image: PIL image object
:param radius: radius of blur
:param fallback_mode: mode to convert image to if current mode is 1
:return: blurred image with size unchanged
"""
if image.mode == '1':
image = image.convert(fallback_mode)
return image.filter(ImageFilter.GaussianBlur(radius))
def write_coherent_3D(data, path, blur, img_res, img_res_r, r, center, img_name):
# for idx_t, coherent_trial in tqdm(enumerate(data), ascii=True, desc='Writing coherent images...', ncols=100):
for idx_t, coherent_trial in enumerate(data):
image_arr = []
for idx_i, interval in enumerate(range(len(coherent_trial[0, 0]))):
img = Image.new(mode='L', size=img_res, color='white')
draw = ImageDraw.Draw(img)
for coordinate in range(len(coherent_trial[0])):
x = coherent_trial[0, coordinate, interval] + center
y = coherent_trial[1, coordinate, interval] + center
draw.ellipse((x - r, y - r, x + r, y + r), fill=0)
del draw
img = img.resize((img_res_r, img_res_r))
if blur:
img = blur_image(img)
image_arr.append(img)
Image.merge("RGBA", [image_arr[0].getchannel(0), image_arr[1].getchannel(0), image_arr[2].getchannel(0),
image_arr[3].getchannel(0)]) \
.save(fp=os.path.join(path, 'coherent', 't{idx_t}{c}'.format(
idx_t=idx_t,
c=img_name)),
quality=100,
format='png')
def write_noise_3D(data, path, blur, img_res, img_res_r, r, center, img_name):
# for idx_t, noise_trial in tqdm(enumerate(data), ascii=True, desc='Writing noise images...', ncols=100):
for idx_t, noise_trial in enumerate(data):
image_arr = []
for idx_i, interval in enumerate(range(len(noise_trial[0, 0]))):
img = Image.new(mode='L', size=img_res, color='white')
draw = ImageDraw.Draw(img)
for coordinate in range(len(noise_trial[0])):
x = noise_trial[0, coordinate, interval] + center
y = noise_trial[1, coordinate, interval] + center
draw.ellipse((x - r, y - r, x + r, y + r), fill=0)
del draw
img = img.resize((img_res_r, img_res_r))
if blur:
img = blur_image(img)
image_arr.append(img)
Image.merge(mode='RGBA',
bands=[image_arr[0].getchannel(0), image_arr[1].getchannel(0), image_arr[2].getchannel(0),
image_arr[3].getchannel(0)]) \
.save(fp=os.path.join(path, 'noise', 't{idx_t}{n}'.format(
idx_t=idx_t,
n=img_name)),
quality=100,
format='png')
def write_coherent_2D(data, path, blur, img_res, img_res_r, r, center, img_name):
# for idx_t, coherent_trial in tqdm(enumerate(data), ascii=True, desc='Writing coherent images...', ncols=100):
for idx_t, coherent_trial in enumerate(data):
for idx_i, interval in enumerate(range(len(coherent_trial[0, 0]))):
img = Image.new(mode='1', size=img_res, color='white')
draw = ImageDraw.Draw(img)
for coordinate in range(len(coherent_trial[0])):
x = coherent_trial[0, coordinate, interval] + center
y = coherent_trial[1, coordinate, interval] + center
draw.ellipse((x - r, y - r, x + r, y + r), fill=0)
del draw
img = img.resize((img_res_r, img_res_r))
if blur:
img = blur_image(img)
img.save(os.path.join(path, 'coherent', 't{idx_t}-i{idx_i}{c}'.format(
idx_t=idx_t,
idx_i=idx_i,
c=img_name)),
quality=100)
def write_noise_2D(data, path, blur, img_res, img_res_r, r, center, img_name):
# for idx_t, noise_trial in tqdm(enumerate(data), ascii=True, desc='Writing noise images...', ncols=100):
for idx_t, noise_trial in enumerate(data):
for idx_i, interval in enumerate(range(len(noise_trial[0, 0]))):
img = Image.new(mode='1', size=img_res, color='white')
draw = ImageDraw.Draw(img)
for coordinate in range(len(noise_trial[0])):
x = noise_trial[0, coordinate, interval] + center
y = noise_trial[1, coordinate, interval] + center
draw.ellipse((x - r, y - r, x + r, y + r), fill=0)
del draw
img = img.resize((img_res_r, img_res_r))
if blur:
img = blur_image(img)
img.save(os.path.join(path, 'noise', 't{idx_t}-i{idx_i}{c}'.format(
idx_t=idx_t,
idx_i=idx_i,
c=img_name)),
quality=100)
| 38.356643 | 115 | 0.577758 | 770 | 5,485 | 3.955844 | 0.157143 | 0.039396 | 0.027577 | 0.010506 | 0.783322 | 0.778398 | 0.753775 | 0.753775 | 0.719632 | 0.719632 | 0 | 0.020461 | 0.29608 | 5,485 | 142 | 116 | 38.626761 | 0.768454 | 0.190337 | 0 | 0.793103 | 0 | 0 | 0.02919 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057471 | false | 0 | 0.022989 | 0 | 0.091954 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f992c2e9d76193298f0d9115beee58c7a9dd19cb | 59 | py | Python | livro/ex4.1.py | KaioPlandel/Estudos-Python-3 | 21c3dfb73b9ef0420eac093434050e4aff8fd61e | [
"MIT"
] | null | null | null | livro/ex4.1.py | KaioPlandel/Estudos-Python-3 | 21c3dfb73b9ef0420eac093434050e4aff8fd61e | [
"MIT"
] | null | null | null | livro/ex4.1.py | KaioPlandel/Estudos-Python-3 | 21c3dfb73b9ef0420eac093434050e4aff8fd61e | [
"MIT"
] | null | null | null | s = '0123456789'
print(s[2:5])
print(s[7:9])
print(s[1:8]) | 11.8 | 16 | 0.59322 | 14 | 59 | 2.5 | 0.642857 | 0.514286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.301887 | 0.101695 | 59 | 5 | 17 | 11.8 | 0.358491 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.75 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
f9c4f08a4223f1658b18b68c0fddba34b09bbaed | 118 | py | Python | python/tests/sns.py | ray-pan-bci/convergdb | d12730d801200a0a084038dc214c23c2d8adf69b | [
"MIT"
] | 10 | 2018-04-19T16:09:11.000Z | 2020-04-15T03:43:28.000Z | python/tests/sns.py | ray-pan-bci/convergdb | d12730d801200a0a084038dc214c23c2d8adf69b | [
"MIT"
] | 13 | 2020-04-09T13:56:38.000Z | 2020-04-15T03:27:02.000Z | python/tests/sns.py | ray-pan-bci/convergdb | d12730d801200a0a084038dc214c23c2d8adf69b | [
"MIT"
] | 4 | 2018-08-31T09:15:27.000Z | 2020-04-28T01:24:17.000Z | from context import convergdb
from structure import *
import pytest
# need to refactor
def test_publish_sns():
pass | 16.857143 | 29 | 0.79661 | 17 | 118 | 5.411765 | 0.823529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.161017 | 118 | 7 | 30 | 16.857143 | 0.929293 | 0.135593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0.2 | 0.6 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
fb39a3986b1834e3e230354e2c87f69da6ba733f | 2,896 | py | Python | tests/test_mdanalysis.py | MQSchleich/dylightful | 6abbb690c8387c522c9bff21c72b5c66aab77ede | [
"MIT"
] | null | null | null | tests/test_mdanalysis.py | MQSchleich/dylightful | 6abbb690c8387c522c9bff21c72b5c66aab77ede | [
"MIT"
] | 5 | 2022-02-05T12:47:42.000Z | 2022-03-16T11:42:20.000Z | tests/test_mdanalysis.py | MQSchleich/dylightful | 6abbb690c8387c522c9bff21c72b5c66aab77ede | [
"MIT"
] | null | null | null | # test postprocessing of MSM for validation purposes and additional viz.
import os
import pytest
from dylightful.discretizer import smooth_projection_k_means, tae_discretizer
from dylightful.mdanalysis import write_dcd, write_state
from dylightful.utilities import get_dir, load_parsed_dyno
dirname = os.path.dirname(__file__)
@pytest.mark.parametrize(
"traj_path, dyn_path, discretizer, num_states",
[
(
"Trajectories/ZIKV/ZIKV-Pro-427-1_dynophore_time_series.json",
"Trajectories/ZIKV/ZIKV-Pro-427-1_dynophore.json",
tae_discretizer,
4,
),
],
)
def test_write_state(traj_path, dyn_path, discretizer, num_states):
"""Testing the writing function of the MDanalysis script
Args:
traj_path ([type]): [description]
dyn_path ([type]): [description]
discretizer ([type]): [description]
num_states ([type]): [description]
"""
topology = os.path.join(dirname, "Trajectories/ZIKV/startframe.pdb")
coordinates = os.path.join(dirname, "Trajectories/ZIKV/test.dcd")
base = os.path.join(dirname, "Trajectories/ZIKV/")
prefix = "ligand_view_"
traj_path = os.path.join(dirname, traj_path)
dyn_path = os.path.join(dirname, dyn_path)
time_ser, num_obs = load_parsed_dyno(traj_path=traj_path)
save_path = get_dir(traj_path)
proj = discretizer(time_ser=time_ser, num_states=num_states, save_path=save_path)
labels = smooth_projection_k_means(proj, num_states)
write_state(
labels=labels[:100], topology=topology, coordinates=coordinates, base=base
)
@pytest.mark.parametrize(
"traj_path, dyn_path, discretizer, num_states",
[
(
"Trajectories/ZIKV/ZIKV-Pro-427-1_dynophore_time_series.json",
"Trajectories/ZIKV/ZIKV-Pro-427-1_dynophore.json",
tae_discretizer,
4,
),
],
)
def test_write_dcd(traj_path, dyn_path, discretizer, num_states):
"""Testing the writing function of the MDanalysis script
Args:
traj_path ([type]): [description]
dyn_path ([type]): [description]
discretizer ([type]): [description]
num_states ([type]): [description]
"""
topology = os.path.join(dirname, "Trajectories/ZIKV/startframe.pdb")
coordinates = os.path.join(dirname, "Trajectories/ZIKV/test.dcd")
base = os.path.join(dirname, "Trajectories/ZIKV/")
prefix = "ligand_view_"
traj_path = os.path.join(dirname, traj_path)
dyn_path = os.path.join(dirname, dyn_path)
time_ser, num_obs = load_parsed_dyno(traj_path=traj_path)
save_path = get_dir(traj_path)
proj = discretizer(time_ser=time_ser, num_states=num_states, save_path=save_path)
labels = smooth_projection_k_means(proj, num_states)
write_dcd(
labels=labels[:100], topology=topology, coordinates=coordinates, base=base
)
| 34.891566 | 85 | 0.690953 | 366 | 2,896 | 5.202186 | 0.202186 | 0.067227 | 0.052521 | 0.089286 | 0.85084 | 0.85084 | 0.85084 | 0.85084 | 0.85084 | 0.786765 | 0 | 0.010309 | 0.196133 | 2,896 | 82 | 86 | 35.317073 | 0.80756 | 0.172997 | 0 | 0.678571 | 0 | 0 | 0.203942 | 0.140531 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.089286 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
fb3c3b67ddff625b4414d76118d96b7f26c021c2 | 456 | py | Python | day01/test_part1.py | Olson3R/aoc-2019-py | 3abf686ec2b2784edf618322576692846f5a00e7 | [
"MIT"
] | null | null | null | day01/test_part1.py | Olson3R/aoc-2019-py | 3abf686ec2b2784edf618322576692846f5a00e7 | [
"MIT"
] | null | null | null | day01/test_part1.py | Olson3R/aoc-2019-py | 3abf686ec2b2784edf618322576692846f5a00e7 | [
"MIT"
] | null | null | null | from part1 import calculate_fuel
def test_calculate_fuel():
assert calculate_fuel(12) == 2, "Invalid value for mass 12, should be 2"
assert calculate_fuel(14) == 2, "Invalid value for mass 14, should be 2"
assert calculate_fuel(1969) == 654, "Invalid value for mass 1969, should be 654"
assert calculate_fuel(100756) == 33583, "Invalid value for mass 100756, should be 33583"
print("Passed!")
if __name__ == "__main__":
test_calculate_fuel()
| 38 | 90 | 0.734649 | 69 | 456 | 4.608696 | 0.376812 | 0.286164 | 0.238994 | 0.238994 | 0.301887 | 0.176101 | 0 | 0 | 0 | 0 | 0 | 0.128272 | 0.162281 | 456 | 11 | 91 | 41.454545 | 0.704188 | 0 | 0 | 0 | 0 | 0 | 0.392544 | 0 | 0 | 0 | 0 | 0 | 0.444444 | 1 | 0.111111 | true | 0.111111 | 0.111111 | 0 | 0.222222 | 0.111111 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
34990927f0569c48c4c117b736802062ad011c77 | 35 | py | Python | index.py | yuanmouren1hao/python_games | 4dfe9187302839b97864b4037703b7cf4cb7940b | [
"Apache-2.0"
] | null | null | null | index.py | yuanmouren1hao/python_games | 4dfe9187302839b97864b4037703b7cf4cb7940b | [
"Apache-2.0"
] | null | null | null | index.py | yuanmouren1hao/python_games | 4dfe9187302839b97864b4037703b7cf4cb7940b | [
"Apache-2.0"
] | null | null | null | #snake game by goldenli
import sys | 17.5 | 24 | 0.8 | 6 | 35 | 4.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.171429 | 35 | 2 | 25 | 17.5 | 0.965517 | 0.628571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
34ffe5a6a7836db8e4618f60abd1f59969d71f3e | 3,311 | py | Python | vote/migrations/0001_initial.py | The-Politico/politico-civic-vote | 49348f6c8137c07e4602da184aa500cbcc09affc | [
"MIT"
] | null | null | null | vote/migrations/0001_initial.py | The-Politico/politico-civic-vote | 49348f6c8137c07e4602da184aa500cbcc09affc | [
"MIT"
] | 4 | 2018-06-27T16:56:29.000Z | 2021-06-10T20:41:56.000Z | vote/migrations/0001_initial.py | The-Politico/politico-civic-vote | 49348f6c8137c07e4602da184aa500cbcc09affc | [
"MIT"
] | null | null | null | # Generated by Django 2.0.2 on 2018-02-15 02:31
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('geography', '0001_initial'),
('election', '0005_auto_20180206_2238'),
]
operations = [
migrations.CreateModel(
name='Delegates',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('count', models.PositiveIntegerField()),
('pct', models.DecimalField(blank=True, decimal_places=3, max_digits=5, null=True)),
('total', models.PositiveIntegerField(blank=True, null=True)),
('superdelegates', models.BooleanField(default=False)),
('candidate_election', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='delegates', to='election.CandidateElection')),
('division', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='+', to='geography.Division')),
],
options={
'verbose_name_plural': 'Delegates',
},
),
migrations.CreateModel(
name='ElectoralVotes',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('count', models.PositiveIntegerField()),
('pct', models.DecimalField(blank=True, decimal_places=3, max_digits=5, null=True)),
('total', models.PositiveIntegerField(blank=True, null=True)),
('winning', models.BooleanField(default=False)),
('candidate_election', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='electoral_votes', to='election.CandidateElection')),
('division', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='+', to='geography.Division')),
],
options={
'verbose_name_plural': 'Electoral Votes',
},
),
migrations.CreateModel(
name='Votes',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('count', models.PositiveIntegerField()),
('pct', models.DecimalField(blank=True, decimal_places=3, max_digits=5, null=True)),
('total', models.PositiveIntegerField(blank=True, null=True)),
('winning', models.BooleanField(default=False)),
('ballot_answer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='election.BallotAnswer')),
('candidate_election', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='votes', to='election.CandidateElection')),
('division', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='+', to='geography.Division')),
],
options={
'verbose_name_plural': 'Votes',
},
),
]
| 50.938462 | 191 | 0.61039 | 322 | 3,311 | 6.161491 | 0.236025 | 0.045363 | 0.056452 | 0.08871 | 0.789819 | 0.789819 | 0.789819 | 0.789819 | 0.789819 | 0.789819 | 0 | 0.017649 | 0.247055 | 3,311 | 64 | 192 | 51.734375 | 0.778179 | 0.013591 | 0 | 0.561404 | 1 | 0 | 0.157782 | 0.037377 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.122807 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
550f690545360a9c386f8f788f27cced5ec51473 | 2,547 | py | Python | dl/models/fots/modules/recog.py | jjjkkkjjj/pytorch.dl | d82aa1191c14f328c62de85e391ac6fa1b4c7ee3 | [
"MIT"
] | 2 | 2021-02-06T22:40:13.000Z | 2021-03-26T09:15:34.000Z | dl/models/fots/modules/recog.py | jjjkkkjjj/pytorch.dl | d82aa1191c14f328c62de85e391ac6fa1b4c7ee3 | [
"MIT"
] | 8 | 2020-07-11T07:10:51.000Z | 2022-03-12T00:39:03.000Z | dl/models/fots/modules/recog.py | jjjkkkjjj/pytorch.dl | d82aa1191c14f328c62de85e391ac6fa1b4c7ee3 | [
"MIT"
] | 2 | 2021-03-26T09:19:42.000Z | 2021-07-27T02:38:09.000Z | from ...layers import *
from ...crnn.base import CRNNBase
class CRNN(CRNNBase):
def __init__(self, class_labels, input_shape, blankIndex):
super().__init__(class_labels, input_shape, blankIndex)
#assert self.input_height == 8, 'height must be 8'
def build_conv(self):
conv_layers = [
*Conv2d.block_relumpool(1, 2, self.input_channel, 64, conv_k_size=(3, 3), conv_stride=(1, 1),
conv_padding=(1, 1),
batch_norm=True, relu_inplace=True, pool_k_size=(2, 1), pool_stride=(2, 1)),
*Conv2d.block_relumpool(2, 2, 64, 128, conv_k_size=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1),
batch_norm=True, relu_inplace=True, pool_k_size=(2, 1), pool_stride=(2, 1)),
*Conv2d.block_relumpool(3, 2, 128, 256, conv_k_size=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1),
batch_norm=True, relu_inplace=True, pool_k_size=(2, 1), pool_stride=(2, 1))
]
return nn.ModuleDict(conv_layers)
def build_rec(self):
rec_layers = [
('BiLSTM', BidirectionalLSTM(256, 256, self.class_nums)),
]
return nn.ModuleDict(rec_layers)
class CRNNin64(CRNNBase):
def __init__(self, class_labels, input_shape, blankIndex):
super().__init__(class_labels, input_shape, blankIndex)
#assert self.input_height == 8, 'height must be 8'
def build_conv(self):
conv_layers = [
*Conv2d.block_relumpool(1, 2, self.input_channel, 128, conv_k_size=(3, 3), conv_stride=(1, 1),
conv_padding=(1, 1),
batch_norm=True, relu_inplace=True, pool_k_size=(2, 1), pool_stride=(2, 1)),
*Conv2d.block_relumpool(2, 2, 128, 256, conv_k_size=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1),
batch_norm=True, relu_inplace=True, pool_k_size=(2, 1), pool_stride=(2, 1)),
*Conv2d.block_relumpool(3, 2, 256, 512, conv_k_size=(3, 3), conv_stride=(1, 1), conv_padding=(1, 1),
batch_norm=True, relu_inplace=True, pool_k_size=(2, 1), pool_stride=(2, 1))
]
return nn.ModuleDict(conv_layers)
def build_rec(self):
rec_layers = [
('BiLSTM1', BidirectionalLSTM(512, 256, 256)),
('BiLSTM2', BidirectionalLSTM(256, 256, self.class_nums)),
]
return nn.ModuleDict(rec_layers) | 44.684211 | 112 | 0.572438 | 339 | 2,547 | 4 | 0.156342 | 0.044248 | 0.088496 | 0.044248 | 0.911504 | 0.911504 | 0.911504 | 0.911504 | 0.911504 | 0.911504 | 0 | 0.074917 | 0.292501 | 2,547 | 57 | 113 | 44.684211 | 0.67758 | 0.038477 | 0 | 0.585366 | 0 | 0 | 0.00817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.146341 | false | 0 | 0.04878 | 0 | 0.341463 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9b90d873e91c0bf272f9c9d1f9c61dc45b69a744 | 70 | py | Python | Inspur/benchmarks/dlrm/implementations/implementation_closed/dlrm/nn/__init__.py | goswamig/training_results_v0.7 | 4278ce8a0f3d4db6b5e6054277724ca36278d7a3 | [
"Apache-2.0"
] | 48 | 2020-07-29T18:09:23.000Z | 2021-10-09T01:53:33.000Z | Inspur/benchmarks/dlrm/implementations/implementation_closed/dlrm/nn/__init__.py | goswamig/training_results_v0.7 | 4278ce8a0f3d4db6b5e6054277724ca36278d7a3 | [
"Apache-2.0"
] | 9 | 2021-04-02T02:28:07.000Z | 2022-03-26T18:23:59.000Z | Lablup/benchmarks/dlrm/implementations/pytorch/dlrm/nn/__init__.py | lablup/training_results_v0.7 | f5bb59aa0f8b18b602763abe47d1d24d0d54b197 | [
"Apache-2.0"
] | 42 | 2020-08-01T06:41:24.000Z | 2022-01-20T10:33:08.000Z | from .modules.buckle_embedding import *
from .modules.gather import *
| 23.333333 | 39 | 0.8 | 9 | 70 | 6.111111 | 0.666667 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 70 | 2 | 40 | 35 | 0.887097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9bc0eb21aac39710978be0a8ad1f6a3e60df91c8 | 74 | py | Python | api/assets/variations/__init__.py | GetmeUK/h51 | 17d4003336857514765a42a0853995fbe3da6525 | [
"MIT"
] | null | null | null | api/assets/variations/__init__.py | GetmeUK/h51 | 17d4003336857514765a42a0853995fbe3da6525 | [
"MIT"
] | 4 | 2021-06-08T22:58:13.000Z | 2022-03-12T00:53:18.000Z | api/assets/variations/__init__.py | GetmeUK/h51 | 17d4003336857514765a42a0853995fbe3da6525 | [
"MIT"
] | null | null | null | from .collection import *
from .document import *
from .download import *
| 18.5 | 25 | 0.756757 | 9 | 74 | 6.222222 | 0.555556 | 0.357143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.162162 | 74 | 3 | 26 | 24.666667 | 0.903226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fd6433e46c972fd9b957336f0356f511b1528049 | 75,258 | py | Python | kive/metadata/tests_BasicConstraints.py | dmacmillan/Kive | 76bc8f289f66fb133f78cb6d5689568b7d015915 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T06:10:01.000Z | 2021-12-22T06:10:01.000Z | kive/metadata/tests_BasicConstraints.py | dmacmillan/Kive | 76bc8f289f66fb133f78cb6d5689568b7d015915 | [
"BSD-3-Clause"
] | null | null | null | kive/metadata/tests_BasicConstraints.py | dmacmillan/Kive | 76bc8f289f66fb133f78cb6d5689568b7d015915 | [
"BSD-3-Clause"
] | null | null | null | """
Unit tests for Shipyard's BasicConstraint class and functionality relating to it.
"""
from django.test import TestCase, skipIfDBFeature
from django.core.exceptions import ValidationError
from metadata.models import *
from constants import datatypes
@skipIfDBFeature('is_mocked')
class BasicConstraintTestSetup(TestCase):
fixtures = ["initial_data", "initial_groups", "initial_user"]
def setUp(self):
"""
General setup for BasicConstraint testing.
"""
# The built-in Shipyard atomic Datatypes.
self.STR = Datatype.objects.get(pk=datatypes.STR_PK)
self.INT = Datatype.objects.get(pk=datatypes.INT_PK)
self.FLOAT = Datatype.objects.get(pk=datatypes.FLOAT_PK)
self.BOOL = Datatype.objects.get(pk=datatypes.BOOL_PK)
class BasicConstraintCleanTests(BasicConstraintTestSetup):
def __test_clean_numeric_constraint_good_h(self, builtin_type, BC_type, constr_val):
"""
Helper for testing clean() on a well-defined (MIN|MAX)_(VAL|LENGTH) constraint.
"""
constr_DT = Datatype(name="ConstrDT", description="Constrained Datatype", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(builtin_type)
constr = constr_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
self.assertEquals(constr.clean(), None)
# Propagation check
self.assertEquals(constr_DT.clean(), None)
def test_clean_min_val_int_good(self):
"""
Testing clean() on a well-defined MIN_VAL constraint on an integer.
"""
self.__test_clean_numeric_constraint_good_h(self.INT, BasicConstraint.MIN_VAL, -7.5)
def test_clean_max_val_int_good(self):
"""
Testing clean() on a well-defined MAX_VAL constraint on an integer.
"""
self.__test_clean_numeric_constraint_good_h(self.INT, BasicConstraint.MAX_VAL, -92)
def test_clean_min_val_float_good(self):
"""
Testing clean() on a well-defined MIN_VAL constraint on a float.
"""
self.__test_clean_numeric_constraint_good_h(self.FLOAT, BasicConstraint.MIN_VAL, 987)
def test_clean_max_val_float_good(self):
"""
Testing clean() on a well-defined MAX_VAL constraint on a float.
"""
self.__test_clean_numeric_constraint_good_h(self.FLOAT, BasicConstraint.MAX_VAL, -7.2)
def test_clean_min_length_good(self):
"""
Testing clean() on a well-defined MIN_LENGTH constraint on a string.
"""
self.__test_clean_numeric_constraint_good_h(self.STR, BasicConstraint.MIN_LENGTH, 8)
def test_clean_max_length_good(self):
"""
Testing clean() on a well-defined MAX_LENGTH constraint on a string.
"""
self.__test_clean_numeric_constraint_good_h(self.STR, BasicConstraint.MAX_LENGTH, 8)
def test_clean_min_length_good_edge(self):
"""
Testing clean() on a minimal (1) well-defined MIN_LENGTH constraint on a string.
Note that MIN_LENGTH should not be 0, as that's the default constraint on any string.
"""
self.__test_clean_numeric_constraint_good_h(self.STR, BasicConstraint.MIN_LENGTH, 1)
def test_clean_max_length_good_edge(self):
"""
Testing clean() on a minimal (1) well-defined MAX_LENGTH constraint on a string.
"""
self.__test_clean_numeric_constraint_good_h(self.STR, BasicConstraint.MAX_LENGTH, 1)
########
def __create_bad_numeric_constraint_h(self, builtin_type, BC_type, constr_val):
"""
Helper for testing clean() on bad numeric constraints.
"""
constr_DT = Datatype(name="ConstrDT", description="Constrained Datatype", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(builtin_type)
constr = constr_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
return constr, constr_DT
def test_clean_min_val_int_bad(self):
"""
Testing clean() on a badly-defined MIN_VAL constraint (integer).
"""
constr, constr_DT = self.__create_bad_numeric_constraint_h(
self.INT, BasicConstraint.MIN_VAL, "foo")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr_DT.clean)
def test_clean_max_val_int_bad(self):
"""
Testing clean() on a badly-defined MAX_VAL constraint (integer).
"""
constr, constr_DT = self.__create_bad_numeric_constraint_h(
self.INT, BasicConstraint.MAX_VAL, "foo")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr_DT.clean)
def test_clean_min_val_float_bad(self):
"""
Testing clean() on a badly-defined MIN_VAL constraint (float).
"""
constr, constr_DT = self.__create_bad_numeric_constraint_h(
self.FLOAT, BasicConstraint.MIN_VAL, "foo")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr_DT.clean)
def test_clean_max_val_float_bad(self):
"""
Testing clean() on a badly-defined MAX_VAL constraint (float).
"""
constr, constr_DT = self.__create_bad_numeric_constraint_h(
self.FLOAT, BasicConstraint.MAX_VAL, "foo")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr_DT.clean)
def test_clean_min_val_str_bad(self):
"""
Testing clean() on a badly-defined MIN_VAL constraint (string).
"""
constr, constr_DT = self.__create_bad_numeric_constraint_h(
self.STR, BasicConstraint.MIN_VAL, "300")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on numeric value, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.STR)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on numeric value, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.STR)),
constr_DT.clean)
def test_clean_max_val_str_bad(self):
"""
Testing clean() on a badly-defined MAX_VAL constraint (string).
"""
constr, constr_DT = self.__create_bad_numeric_constraint_h(
self.STR, BasicConstraint.MAX_VAL, "300")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on numeric value, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.STR)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on numeric value, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.STR)),
constr_DT.clean)
def test_clean_min_val_bool_bad(self):
"""
Testing clean() on a badly-defined MIN_VAL constraint (Boolean).
"""
constr, constr_DT = self.__create_bad_numeric_constraint_h(
self.BOOL, BasicConstraint.MIN_VAL, "300")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on numeric value, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.BOOL)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on numeric value, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.BOOL)),
constr_DT.clean)
def test_clean_max_val_bool_bad(self):
"""
Testing clean() on a badly-defined MAX_VAL constraint (Boolean).
"""
constr, constr_DT = self.__create_bad_numeric_constraint_h(
self.BOOL, BasicConstraint.MAX_VAL, "300")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on numeric value, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.BOOL)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on numeric value, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.BOOL)),
constr_DT.clean)
########
def __test_clean_length_constraint_non_string_h(self, builtin_type, BC_type, constr_val):
"""
Helper for defining tests on (MIN|MAX)_LENGTH constraints wrongly applied to non-string types.
"""
constr_DT = Datatype(name="NumericalWithLengthConstraint",
description="Incorrectly length-constrained Datatype", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(builtin_type)
constr = constr_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
err_msg_key = "BC_length_constraint_on_non_string"
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on string length, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, builtin_type)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on string length, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, builtin_type)),
constr_DT.clean)
def test_clean_min_length_int_bad(self):
"""
Testing clean() on a badly-defined MIN_LENGTH constraint (int).
"""
self.__test_clean_length_constraint_non_string_h(self.INT, BasicConstraint.MIN_LENGTH, 50)
def test_clean_min_length_float_bad(self):
"""
Testing clean() on a badly-defined MIN_LENGTH constraint (float).
"""
self.__test_clean_length_constraint_non_string_h(self.FLOAT, BasicConstraint.MIN_LENGTH, 5)
def test_clean_min_length_bool_bad(self):
"""
Testing clean() on a badly-defined MIN_LENGTH constraint (float).
"""
self.__test_clean_length_constraint_non_string_h(self.BOOL, BasicConstraint.MIN_LENGTH, 12)
def test_clean_max_length_int_bad(self):
"""
Testing clean() on a badly-defined MAX_LENGTH constraint (int).
"""
self.__test_clean_length_constraint_non_string_h(self.INT, BasicConstraint.MAX_LENGTH, 10000)
def test_clean_max_length_float_bad(self):
"""
Testing clean() on a badly-defined MAX_LENGTH constraint (float).
"""
self.__test_clean_length_constraint_non_string_h(self.FLOAT, BasicConstraint.MAX_LENGTH, 1)
def test_clean_max_length_bool_bad(self):
"""
Testing clean() on a badly-defined MAX_LENGTH constraint (bool).
"""
self.__test_clean_length_constraint_non_string_h(self.BOOL, BasicConstraint.MAX_LENGTH, 47)
########
def __test_clean_length_constraint_non_integer_h(self, BC_type, constr_val):
"""
Helper for defining tests on (MIN|MAX)_LENGTH constraints with non-integer values.
"""
constr_DT = Datatype(name="NonIntegerLengthConstraint",
description="String with poorly-formed length constraint",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(self.STR)
constr = constr_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
err_msg_key = "BC_length_constraint_non_integer"
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not an integer'.format(constr, constr_val)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not an integer'.format(constr, constr_val)),
constr_DT.clean)
def test_clean_float_min_length_bad(self):
"""
Testing clean() on a badly-defined (float) MIN_LENGTH constraint.
"""
self.__test_clean_length_constraint_non_integer_h(BasicConstraint.MIN_LENGTH, 4.7)
def test_clean_str_min_length_bad(self):
"""
Testing clean() on a badly-defined (str) MIN_LENGTH constraint.
"""
self.__test_clean_length_constraint_non_integer_h(BasicConstraint.MIN_LENGTH, "foo")
def test_clean_float_max_length_bad(self):
"""
Testing clean() on a badly-defined (float) MAX_LENGTH constraint.
"""
self.__test_clean_length_constraint_non_integer_h(BasicConstraint.MAX_LENGTH, 66.25)
def test_clean_str_max_length_bad(self):
"""
Testing clean() on a badly-defined (str) MIN_LENGTH constraint.
"""
self.__test_clean_length_constraint_non_integer_h(BasicConstraint.MAX_LENGTH, "bar")
########
def __test_clean_length_constraint_too_small_h(self, BC_type, constr_val):
"""
Helper for defining tests on (MIN|MAX)_LENGTH constraints whose values are too small.
"""
constr_DT = Datatype(name="TooSmallLengthConstraint",
description="String with too-small length constraint",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(self.STR)
constr = constr_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
err_msg_key = "BC_length_constraint_non_positive"
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not positive'.format(constr, constr_val)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not positive'.format(constr, constr_val)),
constr_DT.clean)
def test_clean_min_length_non_positive_edge(self):
"""
Testing clean() on an edge-case negative (0) MIN_LENGTH constraint.
"""
self.__test_clean_length_constraint_too_small_h(BasicConstraint.MIN_LENGTH, 0)
def test_clean_min_length_non_positive_regular(self):
"""
Testing clean() on a non-edge non-positive MIN_LENGTH constraint.
"""
self.__test_clean_length_constraint_too_small_h(BasicConstraint.MIN_LENGTH, -15)
def test_clean_max_length_non_positive_edge(self):
"""
Testing clean() on an edge-case non-positive (0) MAX_LENGTH constraint.
"""
self.__test_clean_length_constraint_too_small_h(BasicConstraint.MAX_LENGTH, 0)
def test_clean_max_length_non_positive_regular(self):
"""
Testing clean() on a non-edge non-positive MAX_LENGTH constraint.
"""
self.__test_clean_length_constraint_too_small_h(BasicConstraint.MAX_LENGTH, -20)
########
def __test_clean_regexp_good_h(self, builtin_type, pattern):
"""
Helper to create good REGEXP-constraint test cases.
"""
regexped_DT = Datatype(name="RegexpedDT",
description="Datatype with good REGEXP attached",
user=kive_user())
regexped_DT.full_clean()
regexped_DT.save()
regexped_DT.restricts.add(builtin_type)
regexp_constr = regexped_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="{}".format(pattern))
self.assertEquals(regexp_constr.clean(), None)
# Propagation check.
self.assertEquals(regexped_DT.clean(), None)
def test_clean_regexp_str_good(self):
"""
Testing clean() on a string with a good REGEXP attached.
"""
self.__test_clean_regexp_good_h(self.STR, "foo")
def test_clean_regexp_float_good(self):
"""
Testing clean() on a float with a good REGEXP attached.
"""
self.__test_clean_regexp_good_h(self.STR, "1e.+")
def test_clean_regexp_int_good(self):
"""
Testing clean() on an int with a good REGEXP attached.
"""
# Note that this would be a pretty dumb regexp to put on an integer!
self.__test_clean_regexp_good_h(self.STR, "bar")
def test_clean_regexp_bool_good(self):
"""
Testing clean() on a Boolean with a good REGEXP attached.
"""
# Note that this would be a pretty dumb regexp to put on an integer!
self.__test_clean_regexp_good_h(self.STR, "T|F")
####
def __test_clean_regexp_bad_h(self, builtin_type, pattern):
"""
Helper to create bad REGEXP-constraint test cases.
"""
regexped_DT = Datatype(name="RegexpedDT", description="Datatype with bad REGEXP attached",
user=kive_user())
regexped_DT.full_clean()
regexped_DT.save()
regexped_DT.restricts.add(builtin_type)
regexp_constr = regexped_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP, rule="{}".format(pattern))
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies an invalid regular expression "{}"'
.format(regexp_constr, pattern)),
regexp_constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies an invalid regular expression "{}"'
.format(regexp_constr, pattern)),
regexped_DT.clean)
def test_clean_regexp_str_bad(self):
"""
Testing clean() on a string with a bad REGEXP attached.
"""
self.__test_clean_regexp_bad_h(self.STR, "(.+")
def test_clean_regexp_float_bad(self):
"""
Testing clean() on a float with a bad REGEXP attached.
"""
self.__test_clean_regexp_bad_h(self.FLOAT, "[a-z")
def test_clean_regexp_int_bad(self):
"""
Testing clean() on an int with a bad REGEXP attached.
"""
self.__test_clean_regexp_bad_h(self.INT, "1)")
def test_clean_regexp_bool_bad(self):
"""
Testing clean() on a Boolean with a bad REGEXP attached.
"""
self.__test_clean_regexp_bad_h(self.BOOL, "1919)")
####
def __test_clean_dtf_good_h(self, format_string):
"""
Helper for testing clean() on good DATETIMEFORMATs.
"""
dtf_DT = Datatype(name="GoodDTF", description="String with a DTF constraint attached",
user=kive_user())
dtf_DT.full_clean()
dtf_DT.save()
dtf_DT.restricts.add(self.STR)
dtf = dtf_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule=format_string)
self.assertEquals(dtf.clean(), None)
# Propagation check.
self.assertEquals(dtf_DT.clean(), None)
def test_clean_dtf_good(self):
"""
Testing clean() on a good DATETIMEFORMAT BasicConstraint.
"""
self.__test_clean_dtf_good_h("%Y %b %d")
def test_clean_dtf_good_2(self):
"""
Testing clean() on a second good DATETIMEFORMAT BasicConstraint.
"""
self.__test_clean_dtf_good_h("%A, %Y-%m-%d %H:%M:%S %z")
def test_clean_dtf_good_3(self):
"""
Testing clean() on a third good DATETIMEFORMAT BasicConstraint.
"""
self.__test_clean_dtf_good_h("FOOBAR")
def __test_clean_dtf_bad_h(self, builtin_type, format_string):
"""
Helper for testing clean() on DATETIMEFORMATs applied to non-strings.
"""
dtf_DT = Datatype(name="BadDTF", description="Non-string with a DTF constraint attached",
user=kive_user())
dtf_DT.full_clean()
dtf_DT.save()
dtf_DT.restricts.add(builtin_type)
dtf = dtf_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule=format_string)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a date/time format, but its parent Datatype '
'"{}" has builtin type "{}"'.format(dtf, dtf_DT, builtin_type)),
dtf.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a date/time format, but its parent Datatype '
'"{}" has builtin type "{}"'.format(dtf, dtf_DT, builtin_type)),
dtf_DT.clean)
def test_clean_dtf_float_bad(self):
"""
Testing clean() on a DATETIMEFORMAT applied to a float.
"""
self.__test_clean_dtf_bad_h(self.FLOAT, "%Y %b %d")
def test_clean_dtf_int_bad(self):
"""
Testing clean() on a DATETIMEFORMAT applied to an int.
"""
self.__test_clean_dtf_bad_h(self.INT, "FOOBAR")
def test_clean_dtf_bool_bad(self):
"""
Testing clean() on a DATETIMEFORMAT applied to a Boolean.
"""
self.__test_clean_dtf_bad_h(self.FLOAT, "2014-%m-%d %H:%M:%S %z")
########
def __test_clean_incomplete_parent_bad_h(self, BC_type, constr_val):
"""
Helper for clean() on a BasicConstraint attached to an incomplete Datatype.
"""
incomplete_DT = Datatype(name="IncompleteDT", description="Datatype that does not restrict any builtin",
user=kive_user())
incomplete_DT.full_clean()
incomplete_DT.save()
constr = incomplete_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
err_msg_key = "BC_DT_not_complete"
self.assertRaisesRegexp(ValidationError,
re.escape('Parent Datatype "{}" of BasicConstraint "{}" is not complete'
.format(incomplete_DT, constr)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('Parent Datatype "{}" of BasicConstraint "{}" is not complete'
.format(incomplete_DT, constr)),
incomplete_DT.clean)
def test_clean_incomplete_parent_regexp_bad(self):
"""
Testing clean() on a REGEXP BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.REGEXP, ".*")
def test_clean_incomplete_parent_dtf_bad(self):
"""
Testing clean() on a DATETIMEFORMAT BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.DATETIMEFORMAT, "%Y %b %d")
def test_clean_incomplete_parent_min_val_bad(self):
"""
Testing clean() on a MIN_VAL BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.MIN_VAL, 16)
def test_clean_incomplete_parent_max_val_bad(self):
"""
Testing clean() on a MAX_VAL BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.MAX_VAL, 333333)
def test_clean_incomplete_parent_min_length_bad(self):
"""
Testing clean() on a MIN_LENGTH BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.MIN_LENGTH, 2)
def test_clean_incomplete_parent_max_length_bad(self):
"""
Testing clean() on a MAX_LENGTH BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.MAX_LENGTH, 27)
########
# Some "greatest hits" from the above testing cases where the
# parent Datatype does not directly inherit from a builtin.
def test_clean_second_gen_min_val_int_good(self):
"""
Testing clean() on a well-defined MIN_VAL constraint on a second-generation integer.
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.INT)
constr_DT = Datatype(name="ConstrDT", description="Constrained Datatype", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MIN_VAL, rule="{}".format(-7.5))
self.assertEquals(constr.clean(), None)
# Propagation check
self.assertEquals(constr_DT.clean(), None)
def test_clean_second_gen_max_val_float_bad(self):
"""
Testing clean() on a badly-defined MAX_VAL constraint (second-gen float).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.FLOAT)
constr_DT = Datatype(name="ConstrDT", description="Constrained Datatype", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MAX_VAL, rule="foo")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr_DT.clean)
def test_clean_second_gen_min_length_bool_bad(self):
"""
Testing clean() on a badly-defined MIN_LENGTH constraint (second-gen Boolean).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.BOOL)
constr_DT = Datatype(name="BooleanWithLengthConstraint",
description="Incorrectly length-constrained Datatype",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MIN_LENGTH, rule="{}".format(12))
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on string length, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.BOOL)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on string length, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.BOOL)),
constr_DT.clean)
def test_clean_second_gen_str_max_length_bad(self):
"""
Testing clean() on a badly-defined (str) MIN_LENGTH constraint (second-gen Datatype).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.STR)
constr_DT = Datatype(name="NonIntegerLengthConstraint",
description="String with poorly-formed length constraint",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MAX_LENGTH, rule="bar")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not an integer'.format(constr, "bar")),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not an integer'.format(constr, "bar")),
constr_DT.clean)
def test_clean_second_gen_min_length_non_positive_edge(self):
"""
Testing clean() on an edge-case negative (0) MIN_LENGTH constraint (second-gen Datatype).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.STR)
constr_DT = Datatype(name="TooSmallLengthConstraint",
description="String with too-small length constraint",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MIN_LENGTH, rule="{}".format(0))
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not positive'.format(constr, 0)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not positive'.format(constr, 0)),
constr_DT.clean)
def test_clean_second_gen_regexp_good(self):
"""
Testing clean() on a second-gen Datatype with good REGEXP attached.
"""
mother_DT = Datatype(name="Mother", description="Mother", user=kive_user())
mother_DT.full_clean()
mother_DT.save()
mother_DT.restricts.add(self.STR)
father_DT = Datatype(name="Father", description="Father", user=kive_user())
father_DT.full_clean()
father_DT.save()
father_DT.restricts.add(self.STR)
milkman_DT = Datatype(name="Milkman", description="Milkman", user=kive_user())
milkman_DT.full_clean()
milkman_DT.save()
milkman_DT.restricts.add(self.STR)
regexped_DT = Datatype(name="RegexpedDT",
description="Datatype with good REGEXP attached",
user=kive_user())
regexped_DT.full_clean()
regexped_DT.save()
regexped_DT.restricts.add(mother_DT)
regexped_DT.restricts.add(father_DT)
regexped_DT.restricts.add(milkman_DT)
regexp_constr = regexped_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="foo")
self.assertEquals(regexp_constr.clean(), None)
# Propagation check.
self.assertEquals(regexped_DT.clean(), None)
def test_clean_second_gen_regexp_bad(self):
"""
Testing clean() on a second-gen Datatype with a bad REGEXP constraint.
"""
Danny_DT = Datatype(name="Bob Saget", description="Ostensible father", user=kive_user())
# Danny_DT.full_house()
Danny_DT.full_clean()
Danny_DT.save()
Danny_DT.restricts.add(self.INT)
Joey_DT = Datatype(name="Dave Coulier", description="Popeye imitator", user=kive_user())
Joey_DT.full_clean()
Joey_DT.save()
Joey_DT.restricts.add(self.INT)
Jesse_DT = Datatype(name="John Stamos", description="Mercy-haver", user=kive_user())
Jesse_DT.full_clean()
Jesse_DT.save()
Jesse_DT.restricts.add(self.INT)
# The bad regexp pattern.
pattern = "(.+"
regexped_DT = Datatype(name="RegexpedDT",
description="Datatype with bad REGEXP attached",
user=kive_user())
regexped_DT.full_clean()
regexped_DT.save()
regexped_DT.restricts.add(Danny_DT)
regexped_DT.restricts.add(Joey_DT)
regexped_DT.restricts.add(Jesse_DT)
regexp_constr = regexped_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule=pattern)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies an invalid regular expression "{}"'
.format(regexp_constr, pattern)),
regexp_constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies an invalid regular expression "{}"'
.format(regexp_constr, pattern)),
regexped_DT.clean)
def test_clean_second_gen_dtf_good(self):
"""
Testing clean() on a good DATETIMEFORMAT (second-gen Datatype).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.STR)
dtf_DT = Datatype(name="GoodDTF", description="String with a DTF constraint attached", user=kive_user())
dtf_DT.full_clean()
dtf_DT.save()
dtf_DT.restricts.add(parent_DT)
dtf = dtf_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule="%Y %b %d")
self.assertEquals(dtf.clean(), None)
# Propagation check.
self.assertEquals(dtf_DT.clean(), None)
def test_clean_second_gen_dtf_bad_h(self):
"""
Testing clean() on a DATETIMEFORMATs applied to a float (second-gen).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.FLOAT)
dtf_DT = Datatype(name="BadDTF", description="Float with a DTF constraint attached", user=kive_user())
dtf_DT.full_clean()
dtf_DT.save()
dtf_DT.restricts.add(parent_DT)
dtf = dtf_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule="%Y %b %d")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a date/time format, but its parent Datatype '
'"{}" has builtin type "{}"'.format(dtf, dtf_DT, self.FLOAT)),
dtf.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a date/time format, but its parent Datatype '
'"{}" has builtin type "{}"'.format(dtf, dtf_DT, self.FLOAT)),
dtf_DT.clean)
class BasicConstraintGetEffectiveNumConstraintTests(BasicConstraintTestSetup):
def test_get_effective_min_val_builtins(self):
"""
get_effective_num_constraint, when used to retrieve MIN_VAL restrictions,
should give (None, -float("Inf")) for all builtins.
"""
self.assertEquals(self.STR.get_effective_num_constraint(BasicConstraint.MIN_VAL), (None, -float("inf")))
self.assertEquals(self.INT.get_effective_num_constraint(BasicConstraint.MIN_VAL), (None, -float("inf")))
self.assertEquals(self.FLOAT.get_effective_num_constraint(BasicConstraint.MIN_VAL), (None, -float("inf")))
self.assertEquals(self.BOOL.get_effective_num_constraint(BasicConstraint.MIN_VAL), (None, -float("inf")))
def test_get_effective_max_val_builtins(self):
"""
get_effective_num_constraint, when used to retrieve MAX_VAL restrictions,
should give (None, float("Inf")) for all builtins.
"""
self.assertEquals(self.STR.get_effective_num_constraint(BasicConstraint.MAX_VAL), (None, float("inf")))
self.assertEquals(self.INT.get_effective_num_constraint(BasicConstraint.MAX_VAL), (None, float("inf")))
self.assertEquals(self.FLOAT.get_effective_num_constraint(BasicConstraint.MAX_VAL), (None, float("inf")))
self.assertEquals(self.BOOL.get_effective_num_constraint(BasicConstraint.MAX_VAL), (None, float("inf")))
def test_get_effective_min_length_builtins(self):
"""
get_effective_num_constraint, when used to retrieve MIN_LENGTH restrictions,
should give (None, 0) for all builtins.
"""
self.assertEquals(self.STR.get_effective_num_constraint(BasicConstraint.MIN_LENGTH), (None, 0))
self.assertEquals(self.INT.get_effective_num_constraint(BasicConstraint.MIN_LENGTH), (None, 0))
self.assertEquals(self.FLOAT.get_effective_num_constraint(BasicConstraint.MIN_LENGTH), (None, 0))
self.assertEquals(self.BOOL.get_effective_num_constraint(BasicConstraint.MIN_LENGTH), (None, 0))
def test_get_effective_max_length_builtins(self):
"""
get_effective_num_constraint, when used to retrieve MAX_LENGTH restrictions,
should give (None, float("Inf")) for all builtins.
"""
self.assertEquals(self.STR.get_effective_num_constraint(BasicConstraint.MAX_LENGTH), (None, float("inf")))
self.assertEquals(self.INT.get_effective_num_constraint(BasicConstraint.MAX_LENGTH), (None, float("inf")))
self.assertEquals(self.FLOAT.get_effective_num_constraint(BasicConstraint.MAX_LENGTH), (None, float("inf")))
self.assertEquals(self.BOOL.get_effective_num_constraint(BasicConstraint.MAX_LENGTH), (None, float("inf")))
########
def __test_get_effective_num_constraint_no_constraint_h(self, builtin_type, BC_type):
"""
Helper to test get_effective_num_constraint for several different builtin types
and constraint types in the no-constraint case
"""
no_constr_set = Datatype(name="NoConstrSet", description="No constraint set", user=kive_user())
no_constr_set.clean()
no_constr_set.save()
no_constr_set.restricts.add(builtin_type)
restriction_val = None
if BC_type == BasicConstraint.MIN_VAL:
restriction_val = -float("inf")
elif BC_type in (BasicConstraint.MAX_VAL, BasicConstraint.MAX_LENGTH):
restriction_val = float("inf")
elif BC_type == BasicConstraint.MIN_LENGTH:
restriction_val = 0
else:
# Pathological case: should never happen.
print("WTF this shouldn't happen")
self.assertEquals(no_constr_set.get_effective_num_constraint(BC_type), (None, restriction_val))
def test_get_effective_min_val_int_no_constraint(self):
"""
Datatype (integer) with no MIN_VAL set should have -\infty as its effective min val.
"""
self.__test_get_effective_num_constraint_no_constraint_h(self.INT, BasicConstraint.MIN_VAL)
def test_get_effective_min_val_float_no_constraint(self):
"""
Datatype (float) with no MIN_VAL set should have -\infty as its effective min val.
"""
self.__test_get_effective_num_constraint_no_constraint_h(self.FLOAT, BasicConstraint.MIN_VAL)
def test_get_effective_max_val_int_no_constraint(self):
"""
Datatype (integer) with no MAX_VAL set should have \infty as its effective max val.
"""
self.__test_get_effective_num_constraint_no_constraint_h(self.INT, BasicConstraint.MAX_VAL)
def test_get_effective_max_val_float_no_constraint(self):
"""
Datatype (float) with no MAX_VAL set should have \infty as its effective max val.
"""
self.__test_get_effective_num_constraint_no_constraint_h(self.FLOAT, BasicConstraint.MIN_VAL)
def test_get_effective_min_length_no_constraint(self):
"""
Datatype (string) with no MIN_LENGTH set should have 0 as its effective min length.
"""
self.__test_get_effective_num_constraint_no_constraint_h(self.STR, BasicConstraint.MIN_LENGTH)
def test_get_effective_max_length_no_constraint(self):
"""
Datatype (string) with no MAX_LENGTH set should have \infty as its effective max length.
"""
self.__test_get_effective_num_constraint_no_constraint_h(self.STR, BasicConstraint.MAX_LENGTH)
########
def __test_get_effective_num_constraint_with_constraint_h(self, builtin_type, BC_type, constr_val):
"""
Helper to check retrieving constraints set directly on a Datatype.
"""
constr_DT = Datatype(name="Constrained DT", description="Datatype with numerical constraint",
user=kive_user())
constr_DT.clean()
constr_DT.save()
constr_DT.restricts.add(builtin_type)
constr = constr_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
constr.full_clean()
self.assertEquals(constr_DT.get_effective_num_constraint(BC_type), (constr, constr_val))
def test_get_effective_min_val_int_with_constraint(self):
"""
MIN_VAL constraint set directly on the (integer) Datatype.
"""
self.__test_get_effective_num_constraint_with_constraint_h(self.INT, BasicConstraint.MIN_VAL, -5)
def test_get_effective_min_val_float_with_constraint(self):
"""
MIN_VAL constraint set directly on the (float) Datatype.
"""
self.__test_get_effective_num_constraint_with_constraint_h(self.FLOAT, BasicConstraint.MIN_VAL, 2.5)
def test_get_effective_max_val_int_with_constraint(self):
"""
MAX_VAL constraint set directly on the (integer) Datatype.
"""
self.__test_get_effective_num_constraint_with_constraint_h(self.INT, BasicConstraint.MAX_VAL, 133.7)
def test_get_effective_max_val_float_with_constraint(self):
"""
MAX_VAL constraint set directly on the (float) Datatype.
"""
self.__test_get_effective_num_constraint_with_constraint_h(self.FLOAT, BasicConstraint.MAX_VAL, -3)
def test_get_effective_min_length_with_constraint(self):
"""
MIN_LENGTH constraint set directly on the (string) Datatype.
"""
self.__test_get_effective_num_constraint_with_constraint_h(self.STR, BasicConstraint.MIN_LENGTH, 4)
def test_get_effective_max_length_with_constraint(self):
"""
MAX_LENGTH constraint set directly on the (string) Datatype.
"""
self.__test_get_effective_num_constraint_with_constraint_h(self.STR, BasicConstraint.MAX_LENGTH, 4)
########
def __test_get_effective_num_constraint_inherits_constraint_h(self, builtin_type, BC_type, constr_val):
"""
Helper for testing the inheritance of numerical constraints from a single parent.
"""
constr_parent = Datatype(name="ConstrParent", description="Constrained parent", user=kive_user())
constr_parent.clean()
constr_parent.save()
constr_parent.restricts.add(builtin_type)
constr = constr_parent.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
constr.full_clean()
heir = Datatype(name="Heir", description="Inherits BC from parent", user=kive_user())
heir.clean()
heir.save()
heir.restricts.add(constr_parent)
heir.complete_clean()
self.assertEquals(heir.get_effective_num_constraint(BC_type), (constr, constr_val))
def test_get_effective_min_val_int_inherits_constraint(self):
"""
Datatype (integer) with no MIN_VAL of its own but whose parent has one should inherit it.
"""
self.__test_get_effective_num_constraint_inherits_constraint_h(self.INT, BasicConstraint.MIN_VAL, 4)
def test_get_effective_min_val_float_inherits_constraint(self):
"""
Datatype (float) with no MIN_VAL of its own but whose parent has one should inherit it.
"""
self.__test_get_effective_num_constraint_inherits_constraint_h(self.FLOAT, BasicConstraint.MIN_VAL, 7.5)
def test_get_effective_max_val_int_inherits_constraint(self):
"""
Datatype (integer) with no MAX_VAL of its own but whose parent has one should inherit it.
"""
self.__test_get_effective_num_constraint_inherits_constraint_h(self.INT, BasicConstraint.MAX_VAL, 7.5)
def test_get_effective_max_val_float_inherits_constraint(self):
"""
Datatype (float) with no MAX_VAL of its own but whose parent has one should inherit it.
"""
self.__test_get_effective_num_constraint_inherits_constraint_h(self.FLOAT, BasicConstraint.MAX_VAL, 4)
def test_get_effective_min_length_inherits_constraint(self):
"""
Datatype (string) with no MIN_LENGTH of its own but whose parent has one should inherit it.
"""
self.__test_get_effective_num_constraint_inherits_constraint_h(self.STR, BasicConstraint.MIN_LENGTH, 4)
def test_get_effective_max_length_inherits_constraint(self):
"""
Datatype (string) with no MAX_LENGTH of its own but whose parent has one should inherit it.
"""
self.__test_get_effective_num_constraint_inherits_constraint_h(self.STR, BasicConstraint.MAX_LENGTH, 4)
########
def __test_get_effective_num_constraint_inherits_several_constraints_h(
self, dominant_builtin, other_builtin, BC_type, dominant_constr_val, other_constr_val=None):
"""
Helper for testing the inheritance of constraints from several supertypes.
"""
dominant_parent = Datatype(name="DominantParent", description="Parent with dominant constraint",
user=kive_user())
dominant_parent.full_clean()
dominant_parent.save()
dominant_parent.restricts.add(dominant_builtin)
dominant_constr = dominant_parent.basic_constraints.create(
ruletype=BC_type, rule="{}".format(dominant_constr_val))
other_parent = Datatype(name="OtherParent", description="Parent whose constraint is overruled",
user=kive_user())
other_parent.full_clean()
other_parent.save()
other_parent.restricts.add(other_builtin)
other_constr = None
if other_constr_val != None:
other_constr = other_parent.basic_constraints.create(ruletype=BC_type, rule="{}".format(other_constr_val))
heir = Datatype(name="Heir", description="Inherits from two parents", user=kive_user())
heir.full_clean()
heir.save()
heir.restricts.add(dominant_parent)
heir.restricts.add(other_parent)
self.assertEquals(heir.get_effective_num_constraint(BC_type), (dominant_constr, dominant_constr_val))
# Try swapping the order....
heir.restricts.remove(dominant_parent)
heir.restricts.remove(other_parent)
heir.restricts.add(other_parent)
heir.restricts.add(dominant_parent)
self.assertEquals(heir.get_effective_num_constraint(BC_type), (dominant_constr, dominant_constr_val))
def test_get_effective_min_val_inherits_several_constraints_int_int(self):
"""
Datatype (integer) inheriting several MIN_VALs should inherit the largest one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.INT, self.INT, BasicConstraint.MIN_VAL, 5, 3.2)
def test_get_effective_max_val_inherits_several_constraints_int_float(self):
"""
Datatype (integer) inheriting several MIN_VALs should inherit the largest one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.INT, self.FLOAT, BasicConstraint.MAX_VAL, 3.2, 7)
def test_get_effective_min_val_inherits_several_constraints_float_int(self):
"""
Datatype (integer) inheriting several MIN_VALs should inherit the largest one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.FLOAT, self.INT, BasicConstraint.MIN_VAL, 19, 18)
def test_get_effective_max_val_inherits_several_constraints_float_float(self):
"""
Datatype (float) inheriting several MAX_VALs should inherit the largest one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.FLOAT, self.FLOAT, BasicConstraint.MAX_VAL, 100, 180)
def test_get_effective_min_val_inherits_from_several_with_one_trivial_int_int(self):
"""
Datatype (integer) inheriting from several supertypes but with only one MIN_VAL should inherit that one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.INT, self.INT, BasicConstraint.MIN_VAL, 5, None)
def test_get_effective_max_val_inherits_from_several_with_one_trivial_int_float(self):
"""
Datatype (integer) inheriting from several supertypes but with only one MIN_VAL should inherit that one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.INT, self.FLOAT, BasicConstraint.MAX_VAL, 5, None)
def test_get_effective_min_val_inherits_from_several_with_one_trivial_float_int(self):
"""
Datatype (integer) inheriting from several supertypes but with only one MIN_VAL should inherit that one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.FLOAT, self.INT, BasicConstraint.MIN_VAL, 5, None)
def test_get_effective_max_val_inherits_from_several_with_one_trivial_float_float(self):
"""
Datatype (float) inheriting from several supertypes but with only one MIN_VAL should inherit that one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.FLOAT, self.FLOAT, BasicConstraint.MAX_VAL, 5, None)
def test_get_effective_min_length_inherits_from_several(self):
"""
Datatype (string) inheriting from several supertypes with MIN_LENGTHs should inherit the largest.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.STR, self.STR, BasicConstraint.MIN_LENGTH, 50, 2)
def test_get_effective_max_length_inherits_from_several(self):
"""
Datatype (string) inheriting from several supertypes with MAX_LENGTHs should inherit the smallest.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.STR, self.STR, BasicConstraint.MAX_LENGTH, 2, 50)
def test_get_effective_min_length_inherits_from_several_with_one_trivial(self):
"""
Datatype (string) inheriting from several supertypes, only one of which has a MIN_LENGTH, inherits that one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.STR, self.STR, BasicConstraint.MIN_LENGTH, 2, None)
def test_get_effective_max_length_inherits_from_several_with_one_trivial(self):
"""
Datatype (string) inheriting from several supertypes, only one of which has a MAX_LENGTH, inherits that one.
"""
self.__test_get_effective_num_constraint_inherits_several_constraints_h(
self.STR, self.STR, BasicConstraint.MAX_LENGTH, 20, None)
########
def test_get_effective_min_val_on_bool(self):
"""
Datatype that inherits from BOOL should not have an effective MIN_VAL.
"""
min_zero = Datatype(name="MinZero", description="Integer >= 0", user=kive_user())
min_zero.full_clean()
min_zero.save()
min_zero.restricts.add(self.INT)
min_zero.basic_constraints.create(ruletype=BasicConstraint.MIN_VAL, rule="0")
heir = Datatype(name="Heir", description="Inherits from MinZero and BOOL", user=kive_user())
heir.full_clean()
heir.save()
heir.restricts.add(min_zero)
heir.restricts.add(self.BOOL)
self.assertEquals(heir.get_effective_num_constraint(BasicConstraint.MIN_VAL), (None, -float("inf")))
def test_get_effective_max_length_on_float(self):
"""
Datatype that inherits from FLOAT should not have an effective MAX_LENGTH.
"""
max_50 = Datatype(name="Max50", description="String of length <= 50", user=kive_user())
max_50.full_clean()
max_50.save()
max_50.restricts.add(self.STR)
max_50.basic_constraints.create(ruletype=BasicConstraint.MAX_LENGTH, rule="50")
heir = Datatype(name="Heir", description="Inherits from Max50 and FLOAT", user=kive_user())
heir.full_clean()
heir.save()
heir.restricts.add(max_50)
heir.restricts.add(self.FLOAT)
self.assertEquals(heir.get_effective_num_constraint(BasicConstraint.MAX_LENGTH), (None, float("inf")))
def test_get_effective_min_length_on_bool(self):
"""
Datatype that inherits from BOOL should not have an effective MIN_LENGTH.
"""
min_50 = Datatype(name="Min50", description="String of length <= 50", user=kive_user())
min_50.full_clean()
min_50.save()
min_50.restricts.add(self.STR)
min_50.basic_constraints.create(ruletype=BasicConstraint.MIN_LENGTH, rule="50")
heir = Datatype(name="Heir", description="Inherits from Min50 and BOOL", user=kive_user())
heir.full_clean()
heir.save()
heir.restricts.add(min_50)
heir.restricts.add(self.BOOL)
self.assertEquals(heir.get_effective_num_constraint(BasicConstraint.MIN_LENGTH), (None, 0))
########
def __test_get_effective_num_constraint_BC_overrides_inherited_h(self, builtin_type, supertype_builtin_type,
BC_type, constr_val, supertype_constr_val):
"""
Helper for testing cases where a Datatype overrides its supertypes' constraints.
"""
super_DT = Datatype(name="SuperDT", description="Supertype with constraint", user=kive_user())
super_DT.full_clean()
super_DT.save()
super_DT.restricts.add(supertype_builtin_type)
super_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(supertype_constr_val))
heir_DT = Datatype(name="Heir", description="Heir of supertype with overriding constraint", user=kive_user())
heir_DT.full_clean()
heir_DT.save()
heir_DT.restricts.add(builtin_type)
override = heir_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
self.assertEquals(heir_DT.get_effective_num_constraint(BC_type), (override, constr_val))
# We just pick a few cases to test for this situation.
def test_get_effective_min_val_float_overrides_inherited(self):
"""
Get MIN_VAL from Datatype that overrides its inherited MIN_VAL.
"""
self.__test_get_effective_num_constraint_BC_overrides_inherited_h(
self.FLOAT, self.FLOAT, BasicConstraint.MIN_VAL, 33, 30
)
def test_get_effective_max_val_int_overrides_inherited(self):
"""
Get MAX_VAL from Datatype that overrides its inherited MAX_VAL.
"""
self.__test_get_effective_num_constraint_BC_overrides_inherited_h(
self.INT, self.FLOAT, BasicConstraint.MAX_VAL, 22, 37
)
def test_get_effective_min_length_overrides_inherited(self):
"""
Get MIN_LENGTH from Datatype that overrides its inherited MIN_LENGTH.
"""
self.__test_get_effective_num_constraint_BC_overrides_inherited_h(
self.STR, self.STR, BasicConstraint.MIN_LENGTH, 30, 5
)
def test_get_effective_max_length_overrides_inherited(self):
"""
Get MAX_LENGTH from Datatype that overrides its inherited MAX_LENGTH.
"""
self.__test_get_effective_num_constraint_BC_overrides_inherited_h(
self.STR, self.STR, BasicConstraint.MAX_LENGTH, 16, 17
)
class BasicConstraintGetAllRegexpTests(BasicConstraintTestSetup):
# There should be no distinction on what builtin types a Datatype
# inherits from, so we just shuffle through them.
def test_no_regexps(self):
"""
Case where Datatype has no regexps defined on it.
"""
my_DT = Datatype(name="NoRegexpDT", description="Unfettered DT", user=kive_user())
my_DT.full_clean()
my_DT.save()
my_DT.restricts.add(self.STR)
self.assertEquals(my_DT.get_all_regexps(), [])
def test_no_regexps_second_gen(self):
"""
Case where Datatype has no regexps defined on it and neither do its supertypes.
"""
super_DT = Datatype(name="SuperDT", description="Unfettered FLOAT", user=kive_user())
super_DT.save()
super_DT.restricts.add(self.FLOAT)
second_DT = Datatype(name="SecondDT", description="Unfettered INT", user=kive_user())
second_DT.save()
second_DT.restricts.add(self.INT)
my_DT = Datatype(name="NoRegexpDT", description="Unfettered DT", user=kive_user())
my_DT.full_clean()
my_DT.save()
my_DT.restricts.add(second_DT)
self.assertEquals(second_DT.get_all_regexps(), [])
self.assertEquals(my_DT.get_all_regexps(), [])
def test_one_direct_regexp(self):
"""
Case where Datatype has one regexp defined on it.
"""
my_DT = Datatype(name="RegexpedDT", description="Regexped Boolean", user=kive_user())
my_DT.full_clean()
my_DT.save()
my_DT.restricts.add(self.BOOL)
regexp_BC = my_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="T|F")
self.assertEquals(my_DT.get_all_regexps(), [regexp_BC])
def test_several_direct_regexps(self):
"""
Case where Datatype has several regexps defined on it.
"""
my_DT = Datatype(name="RegexpedDT", description="Regexped Boolean", user=kive_user())
my_DT.full_clean()
my_DT.save()
my_DT.restricts.add(self.BOOL)
regexp_BC = my_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="T|F")
regexp2_BC = my_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="T")
regexp3_BC = my_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule=".*")
self.assertSetEqual(set(my_DT.get_all_regexps()), {regexp_BC, regexp2_BC, regexp3_BC})
def test_one_inherited_regexp(self):
"""
Case where Datatype has no regexps defined on it but its supertypes do.
"""
super_DT = Datatype(name="SuperDT", description="Regexped STR", user=kive_user())
super_DT.save()
super_DT.restricts.add(self.STR)
regexp_BC = super_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="1e.+")
second_DT = Datatype(name="SecondDT", description="FLOAT inheriting a REGEXP", user=kive_user())
second_DT.save()
second_DT.restricts.add(super_DT)
second_DT.restricts.add(self.FLOAT)
my_DT = Datatype(name="InheritingDT", description="Third-gen inheriting DT", user=kive_user())
my_DT.full_clean()
my_DT.save()
my_DT.restricts.add(second_DT)
self.assertEquals(second_DT.get_all_regexps(), [regexp_BC])
self.assertEquals(my_DT.get_all_regexps(), [regexp_BC])
def test_several_inherited_regexps(self):
"""
Case where Datatype inherits several regexps and has none of its own.
"""
super_DT = Datatype(name="SuperDT", description="Regexped FLOAT", user=kive_user())
super_DT.save()
super_DT.restricts.add(self.FLOAT)
regexp_BC = super_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="1999393939.....")
second_DT = Datatype(name="SecondDT", description="FLOAT inheriting a REGEXP", user=kive_user())
second_DT.save()
second_DT.restricts.add(super_DT)
regexp2_BC = super_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="[1-9]+")
my_DT = Datatype(name="InheritingDT", description="Third-gen inheriting DT", user=kive_user())
my_DT.full_clean()
my_DT.save()
my_DT.restricts.add(second_DT)
self.assertSetEqual(set(second_DT.get_all_regexps()), {regexp_BC, regexp2_BC})
self.assertSetEqual(set(my_DT.get_all_regexps()), {regexp_BC, regexp2_BC})
def test_several_once_removed_inherited_regexps(self):
"""
Case where Datatype inherits several regexps from direct ancestors and has none of its own.
"""
super_DT = Datatype(name="SuperDT", description="Regexped FLOAT", user=kive_user())
super_DT.save()
super_DT.restricts.add(self.FLOAT)
regexp_BC = super_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="1999393939.....")
second_DT = Datatype(name="SecondDT", description="FLOAT inheriting a REGEXP", user=kive_user())
second_DT.save()
second_DT.restricts.add(self.FLOAT)
regexp2_BC = super_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="[1-9]+")
my_DT = Datatype(name="InheritingDT", description="Third-gen inheriting DT", user=kive_user())
my_DT.full_clean()
my_DT.save()
my_DT.restricts.add(super_DT)
my_DT.restricts.add(second_DT)
self.assertSetEqual(set(my_DT.get_all_regexps()), {regexp_BC, regexp2_BC})
def test_several_regexps_multiple_sources(self):
"""
Case where Datatype inherits several regexps from ancestors and has some of its own.
"""
super_DT = Datatype(name="SuperDT", description="Regexped FLOAT", user=kive_user())
super_DT.save()
super_DT.restricts.add(self.STR)
regexp_BC = super_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule=".*")
second_DT = Datatype(name="SecondDT", description="STR inheriting a REGEXP", user=kive_user())
second_DT.save()
second_DT.restricts.add(super_DT)
regexp2_BC = second_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="[0-9]*")
regexp3_BC = second_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="[1-7]*")
third_DT = Datatype(name="ThirdDT", description="STR inheriting a REGEXP", user=kive_user())
third_DT.save()
third_DT.restricts.add(self.STR)
regexp4_BC = third_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule=".+")
my_DT = Datatype(name="InheritingDT", description="Third-gen inheriting DT", user=kive_user())
my_DT.full_clean()
my_DT.save()
my_DT.restricts.add(second_DT)
my_DT.restricts.add(third_DT)
regexp5_BC = my_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="[4-7]+")
second_DT_regexps = second_DT.get_all_regexps()
self.assertEquals(len(second_DT_regexps), 3)
self.assertEquals(regexp_BC in second_DT_regexps, True)
self.assertEquals(regexp2_BC in second_DT_regexps, True)
self.assertEquals(regexp3_BC in second_DT_regexps, True)
my_DT_regexps = my_DT.get_all_regexps()
self.assertEquals(len(my_DT_regexps), 5)
self.assertEquals(regexp_BC in my_DT_regexps, True)
self.assertEquals(regexp2_BC in my_DT_regexps, True)
self.assertEquals(regexp3_BC in my_DT_regexps, True)
self.assertEquals(regexp4_BC in my_DT_regexps, True)
self.assertEquals(regexp5_BC in my_DT_regexps, True)
class BasicConstraintGetEffectiveDatetimeformatTests(BasicConstraintTestSetup):
def test_on_builtins(self):
"""
Test on the builtin types.
"""
self.assertEquals(self.STR.get_effective_datetimeformat(), None)
self.assertEquals(self.INT.get_effective_datetimeformat(), None)
self.assertEquals(self.FLOAT.get_effective_datetimeformat(), None)
self.assertEquals(self.BOOL.get_effective_datetimeformat(), None)
def __test_no_dtf_h(self, builtin_type):
"""
Helper to test the cases where a non-builtin Datatype has no DTF defined.
"""
constr_DT = Datatype(name="DTwithoutDTF", description="Datatype with no DTF", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(builtin_type)
self.assertEquals(constr_DT.get_effective_datetimeformat(), None)
def test_no_dtf_str(self):
"""
Test case where a non-builtin string Datatype has no DATETIMEFORMAT.
"""
self.__test_no_dtf_h(self.STR)
def test_no_dtf_int(self):
"""
Test case where a non-builtin integer Datatype has no DATETIMEFORMAT.
"""
self.__test_no_dtf_h(self.INT)
def test_no_dtf_float(self):
"""
Test case where a non-builtin float Datatype has no DATETIMEFORMAT.
"""
self.__test_no_dtf_h(self.FLOAT)
def test_no_dtf_bool(self):
"""
Test case where a non-builtin Boolean Datatype has no DATETIMEFORMAT.
"""
self.__test_no_dtf_h(self.BOOL)
def test_direct_dtf_str(self):
"""
Testing the case where a string has a direct DTF defined.
"""
constr_DT = Datatype(name="DTwithDTF", description="Datatype with a DTF", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(self.STR)
new_DTF = constr_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule="%Y %m %d %H:%M:%S")
self.assertEquals(constr_DT.get_effective_datetimeformat(), new_DTF)
def test_inherited_dtf(self):
"""
Testing the case where a string has one supertype and inherits its DTF.
"""
super_DT = Datatype(name="DTwithDTF", description="Datatype with a DTF", user=kive_user())
super_DT.full_clean()
super_DT.save()
super_DT.restricts.add(self.STR)
new_DTF = super_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule="%Y %m %d %H:%M:%S %z")
constr_DT = Datatype(name="InheritingDT", description="Datatype with inherited DTF", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(super_DT)
self.assertEquals(constr_DT.get_effective_datetimeformat(), new_DTF)
def test_inherited_dtf_non_str(self):
"""
Testing the case where a non-string has a supertype with a DTF.
"""
super_DT = Datatype(name="DTwithDTF", description="Datatype with a DTF", user=kive_user())
super_DT.full_clean()
super_DT.save()
super_DT.restricts.add(self.STR)
new_DTF = super_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule="%Y")
constr_DT = Datatype(name="InheritingDT", description="Non-string Datatype with inherited DTF",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(super_DT)
constr_DT.restricts.add(self.INT)
self.assertEquals(constr_DT.get_effective_datetimeformat(), None)
def test_distantly_inherited_dtf(self):
"""
Testing the case where a non-string has several supertypes and inherits a DTF from an indirect ancestor.
"""
super_DT = Datatype(name="AncestorDT", description="Ancestor Datatype with no DTF", user=kive_user())
super_DT.full_clean()
super_DT.save()
super_DT.restricts.add(self.STR)
super2_DT = Datatype(name="DTwithDTF", description="Datatype with a DTF", user=kive_user())
super2_DT.full_clean()
super2_DT.save()
super2_DT.restricts.add(self.STR)
new_DTF = super2_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule="%Y")
super3_DT = Datatype(name="DTwithREGEXP", description="Datatype with a REGEXP but no DTF", user=kive_user())
super3_DT.full_clean()
super3_DT.save()
super3_DT.restricts.add(self.STR)
super3_DT.basic_constraints.create(ruletype=BasicConstraint.MIN_LENGTH, rule="4")
super4_DT = Datatype(name="DirectAncestor", description="Datatype with no DTF", user=kive_user())
super4_DT.full_clean()
super4_DT.save()
super4_DT.restricts.add(super2_DT)
constr_DT = Datatype(name="InheritingDT", description="Non-string Datatype with inherited DTF",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(super2_DT)
constr_DT.restricts.add(super4_DT)
self.assertEquals(constr_DT.get_effective_datetimeformat(), new_DTF)
| 45.445652 | 120 | 0.628797 | 8,625 | 75,258 | 5.179478 | 0.041971 | 0.020684 | 0.029011 | 0.038614 | 0.872563 | 0.841761 | 0.80749 | 0.769794 | 0.72771 | 0.686992 | 0 | 0.004922 | 0.276436 | 75,258 | 1,655 | 121 | 45.473112 | 0.815459 | 0.145021 | 0 | 0.504792 | 0 | 0 | 0.111823 | 0.004176 | 0 | 0 | 0 | 0 | 0.112886 | 1 | 0.142705 | false | 0 | 0.00426 | 0 | 0.15442 | 0.001065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b5fa8ce6091903841f44f5209afb75eb6fd39cc3 | 121 | py | Python | admin_views/conf.py | estyxx/django-admin-views | f370a69a658301647033f778c472e044283d82e7 | [
"BSD-3-Clause"
] | 85 | 2015-01-18T05:52:43.000Z | 2021-06-21T15:58:59.000Z | admin_views/conf.py | estyxx/django-admin-views | f370a69a658301647033f778c472e044283d82e7 | [
"BSD-3-Clause"
] | 23 | 2015-02-01T16:53:59.000Z | 2021-12-02T15:41:36.000Z | admin_views/conf.py | estyxx/django-admin-views | f370a69a658301647033f778c472e044283d82e7 | [
"BSD-3-Clause"
] | 26 | 2015-07-26T20:54:55.000Z | 2020-12-23T11:48:11.000Z | from django.conf import settings
ADMIN_VIEWS_SITE = getattr(settings, 'ADMIN_VIEWS_SITE', 'django.contrib.admin.site')
| 24.2 | 85 | 0.801653 | 17 | 121 | 5.470588 | 0.588235 | 0.27957 | 0.387097 | 0.473118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 121 | 4 | 86 | 30.25 | 0.845455 | 0 | 0 | 0 | 0 | 0 | 0.338843 | 0.206612 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
bd16b8696db67aa02712fc33e34da5961dee5ecd | 32 | py | Python | modulo 02/sys/simples.py | p-g-krish/CursoSecurityToolsPython | 7b2205a33d23166a37a6b8105b9ca5863855aa85 | [
"Apache-2.0"
] | 10 | 2020-02-13T03:14:29.000Z | 2021-09-16T04:32:40.000Z | modulo 02/sys/simples.py | p-g-krish/CursoSecurityToolsPython | 7b2205a33d23166a37a6b8105b9ca5863855aa85 | [
"Apache-2.0"
] | null | null | null | modulo 02/sys/simples.py | p-g-krish/CursoSecurityToolsPython | 7b2205a33d23166a37a6b8105b9ca5863855aa85 | [
"Apache-2.0"
] | 4 | 2020-02-18T23:42:23.000Z | 2021-09-10T05:52:09.000Z | import sys
print(sys.argv[1]) | 6.4 | 18 | 0.6875 | 6 | 32 | 3.666667 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037037 | 0.15625 | 32 | 5 | 18 | 6.4 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
bd2099e1704f432f385914a66cd86b159f294eb6 | 3,866 | py | Python | tests/test_orders_import.py | Nwke/rest-api-couriers-service | 123b362fc51b5ff62403cf7567982513caa5bb5a | [
"Apache-1.1"
] | null | null | null | tests/test_orders_import.py | Nwke/rest-api-couriers-service | 123b362fc51b5ff62403cf7567982513caa5bb5a | [
"Apache-1.1"
] | null | null | null | tests/test_orders_import.py | Nwke/rest-api-couriers-service | 123b362fc51b5ff62403cf7567982513caa5bb5a | [
"Apache-1.1"
] | null | null | null | import requests
async def test_valid_post_data(rebuild_db_tables):
data = {
"data": [
{
"order_id": 1,
"weight": 0.23,
"region": 12,
"delivery_hours": ["09:00-18:00"]
},
{
"order_id": 2,
"weight": 15,
"region": 1,
"delivery_hours": ["09:00-18:00"]
},
{
"order_id": 3,
"weight": 0.01,
"region": 22,
"delivery_hours": ["09:00-12:00", "16:00-21:30"]
}]
}
r = requests.post("http://0.0.0.0:80/orders", json=data)
print(r.status_code)
assert r.status_code == 201
assert r.json() == {
'orders': [{'id': 1}, {'id': 2}, {'id': 3}]
}
async def test_invalid_post_data(rebuild_db_tables):
data = {"data": [
{
"order_id": 2,
"weight": 15,
"region": 1,
"delivery_hours": ["09:00-18:00"]
},
{
"order_id": 3,
"weight": 0.01,
"region": 22,
"delivery_hours_bitch_wrong_field": ["09:00-12:00", "16:00-21:30"]
}
]
}
r = requests.post("http://0.0.0.0:80/orders", json=data)
print(r.status_code)
assert r.status_code == 400
assert r.json() == {
'validation_error': {
'orders': [{'id': 3}]
}
}
async def test_invalid_multiple_post_data(rebuild_db_tables):
data = {"data": [
{
"order_id": 2,
# "weight": 0.11,
"region": 1,
"delivery_hours": ["09:00-18:00"]
},
{
"order_id": 3,
"weight": 0.01,
"region": 22,
"delivery_hours_bitch_wrong_field": ["09:00-12:00", "16:00-21:30"]
}
]
}
r = requests.post("http://0.0.0.0:80/orders", json=data)
print(r.status_code)
assert r.status_code == 400
assert r.json() == {
'validation_error': {
'orders': [{'id': 2}, {'id': 3}]
}
}
async def test_missing_field_in_post_data(rebuild_db_tables):
data = {"data": [
{
"order_id": 3,
"weight": 0.01,
# "region": 22,
"delivery_hours": ["09:00-12:00", "16:00-21:30"]
}]
}
r = requests.post("http://0.0.0.0:80/orders", json=data)
print(r.status_code)
assert r.status_code == 400
assert r.json() == {
'validation_error': {
'orders': [{'id': 3}]
}
}
async def test_undeclared_field_in_post_data(rebuild_db_tables):
data = {"data": [
{
"order_id": 3,
"weight": 0.01,
"region": 22,
"delivery_hours": ["09:00-12:00", "16:00-21:30"],
"sense_of_life": "undefined"
}]
}
r = requests.post("http://0.0.0.0:80/orders", json=data)
print(r.status_code)
assert r.status_code == 400
assert r.json() == {
'validation_error': {
'orders': [{'id': 3}]
}
}
async def test_replaced_field_in_post_data(rebuild_db_tables):
data = {"data": [
{
"order_id": 3,
"weight": 0.01,
"delivery_hours": ["09:00-12:00", "16:00-21:30"],
"sense_of_life": "undefined"
}]
}
r = requests.post("http://0.0.0.0:80/orders", json=data)
print(r.status_code)
assert r.status_code == 400
assert r.json() == {'validation_error': {
'orders': [{'id': 3}]
}}
async def test_empty_post_data(rebuild_db_tables):
data = {"data": [
]
}
r = requests.post("http://0.0.0.0:80/orders", json=data)
print(r.status_code)
assert r.status_code == 201
assert r.json() == {'orders': []}
| 23.573171 | 78 | 0.460683 | 463 | 3,866 | 3.650108 | 0.12959 | 0.024852 | 0.024852 | 0.080473 | 0.94497 | 0.94497 | 0.936686 | 0.898225 | 0.885799 | 0.863314 | 0 | 0.102554 | 0.361873 | 3,866 | 163 | 79 | 23.717791 | 0.582489 | 0.007501 | 0 | 0.601504 | 0 | 0 | 0.238393 | 0.016693 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0 | false | 0 | 0.007519 | 0 | 0.007519 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
1f2379e3e17b3da850cf5cc83694299db334bbbe | 266 | py | Python | hmtl/training/metrics/__init__.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | hmtl/training/metrics/__init__.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | hmtl/training/metrics/__init__.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | # coding: utf-8
from hmtl.training.metrics.relation_f1_measure import RelationF1Measure
from hmtl.training.metrics.conll_coref_full_scores import ConllCorefFullScores
from hmtl.training.metrics.accuracy import Accuracy
from hmtl.training.metrics.f1_score import F1
| 38 | 78 | 0.868421 | 37 | 266 | 6.081081 | 0.513514 | 0.142222 | 0.284444 | 0.408889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020325 | 0.075188 | 266 | 6 | 79 | 44.333333 | 0.894309 | 0.048872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1f2a91092b3ac19fb4922770fdbdd689be821418 | 74,159 | py | Python | SBaaS_isotopomer/stage01_isotopomer_MQResultsTable_query.py | dmccloskey/SBaaS_isotopomer | b669abd6e41034739a2c53d855005753e658c436 | [
"MIT"
] | null | null | null | SBaaS_isotopomer/stage01_isotopomer_MQResultsTable_query.py | dmccloskey/SBaaS_isotopomer | b669abd6e41034739a2c53d855005753e658c436 | [
"MIT"
] | null | null | null | SBaaS_isotopomer/stage01_isotopomer_MQResultsTable_query.py | dmccloskey/SBaaS_isotopomer | b669abd6e41034739a2c53d855005753e658c436 | [
"MIT"
] | null | null | null | from .stage01_isotopomer_MQResultsTable_postgresql_models import *
from SBaaS_LIMS.lims_experiment_postgresql_models import *
from SBaaS_LIMS.lims_sample_postgresql_models import *
from SBaaS_LIMS.lims_msMethod_postgresql_models import *
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
class stage01_isotopomer_MQResultsTable_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_isotopomer_mqresultstable':data_stage01_isotopomer_mqresultstable,
};
self.set_supportedTables(tables_supported);
def initialize_dataStage01_isotopomer_MQResultsTable(self):
try:
data_stage01_isotopomer_MQResultsTable.__table__.create(self.engine,True);
except SQLAlchemyError as e:
print(e);
def drop_dataStage01_isotopomer_MQResultsTable(self):
try:
data_stage01_isotopomer_MQResultsTable.__table__.drop(self.engine,True);
except SQLAlchemyError as e:
print(e);
# query sample names from data_stage01_isotopomer_mqresultstable
def get_sampleNames_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=5):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_isotopomer_MQResultsTable.sample_name).filter(
data_stage01_isotopomer_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
data_stage01_isotopomer_MQResultsTable.sample_name).order_by(
data_stage01_isotopomer_MQResultsTable.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_allSampleNames_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=5):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_isotopomer_MQResultsTable.sample_name).filter(
data_stage01_isotopomer_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
data_stage01_isotopomer_MQResultsTable.sample_name).order_by(
data_stage01_isotopomer_MQResultsTable.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=5):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample.sample_id.like(sample_id_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleIDAndSampleDilution(self,experiment_id_I,sample_id_I,sample_dilution_I,exp_type_I=5):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample.sample_id.like(sample_id_I),
sample.sample_dilution == sample_dilution_I,
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleNameShortAndSampleDescription(self,experiment_id_I,sample_name_short_I,sample_decription_I,exp_type_I=5):
'''Querry sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample_description.sample_name_short.like(sample_name_short_I),
sample_description.sample_desc.like(sample_decription_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleNameAbbreviationAndSampleDescription(self,experiment_id_I,sample_name_abbreviation_I,sample_decription_I,exp_type_I=5):
'''Querry sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.sample_desc.like(sample_decription_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNames_experimentIDAndSampleNameAbbreviationAndSampleDilution(self,experiment_id_I,sample_name_abbreviation_I,sample_dilution_I,exp_type_I=5):
'''Querry sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample.sample_dilution == sample_dilution_I,
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNamesAndReplicateNumbersAndSampleTypes_experimentIDAndSampleNameAbbreviationAndSampleDescriptionAndComponentNameAndTimePointAndDilution(\
self,experiment_id_I,sample_name_abbreviation_I,sample_description_I,component_name_I,time_point_I,sample_dilution_I,exp_type_I=5):
'''Querry sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name,
sample_description.sample_replicate,
sample.sample_type).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
sample_description.sample_desc.like(sample_description_I),
sample.sample_id.like(sample_description.sample_id),
sample.sample_dilution == sample_dilution_I,
experiment.sample_name.like(sample.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name,
sample_description.sample_replicate,
sample.sample_type).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
sample_replicates_O = [];
sample_types_O = [];
for sn in sample_names:
sample_names_O.append(sn.sample_name);
sample_replicates_O.append(sn.sample_replicate);
sample_types_O.append(sn.sample_type);
return sample_names_O,sample_replicates_O,sample_types_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNamesAndReplicateNumbersAndSampleTypes_experimentIDAndSampleNameAbbreviationAndSampleDescriptionAndTimePointAndDilution(\
self,experiment_id_I,sample_name_abbreviation_I,sample_description_I,time_point_I,sample_dilution_I,exp_type_I=5):
'''Querry sample names that are used from
the experiment'''
try:
sample_names = self.session.query(sample.sample_name,
sample_description.sample_replicate,
sample.sample_type).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
sample_description.sample_desc.like(sample_description_I),
sample.sample_id.like(sample_description.sample_id),
sample.sample_dilution == sample_dilution_I,
experiment.sample_name.like(sample.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).group_by(
sample.sample_name,
sample_description.sample_replicate,
sample.sample_type).order_by(
sample.sample_name.asc()).all();
sample_names_O = [];
sample_replicates_O = [];
sample_types_O = [];
for sn in sample_names:
sample_names_O.append(sn.sample_name);
sample_replicates_O.append(sn.sample_replicate);
sample_types_O.append(sn.sample_type);
return sample_names_O,sample_replicates_O,sample_types_O;
except SQLAlchemyError as e:
print(e);
# query sample ids from data_stage01_isotopomer_mqresultstable
def get_sampleIDs_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=5):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_ids = self.session.query(sample.sample_id).filter(
data_stage01_isotopomer_MQResultsTable.sample_type.like(sample_type_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample.sample_id).order_by(
sample.sample_id.asc()).all();
sample_ids_O = [];
for si in sample_ids: sample_ids_O.append(si.sample_id);
return sample_ids_O;
except SQLAlchemyError as e:
print(e);
def get_sampleIDs_experimentID(self,experiment_id_I,exp_type_I=5):
'''Querry sample names that are used from the experiment'''
try:
sample_ids = self.session.query(sample.sample_id).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample.sample_id).order_by(
sample.sample_id.asc()).all();
sample_ids_O = [];
for si in sample_ids: sample_ids_O.append(si.sample_id);
return sample_ids_O;
except SQLAlchemyError as e:
print(e);
def get_sampleID_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=5):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_id = self.session.query(sample.sample_id).filter(
sample.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample.sample_id).all();
sample_id_O = sample_id[0];
return sample_id_O;
except SQLAlchemyError as e:
print(e);
# query sample names and sample name short
def get_sampleNamesAndShortName_experimentIDAndSampleTypeAndTimePointAndDilution(experiment_id_I,sample_type_I,tp,dilution_I,exp_type_I=5):
'''Querry sample name and sample name short that are used from
the experimentfor specific time-points and dilutions'''
try:
sample_name_short = self.session.query(sample.sample_name,
sample_description.sample_name_short).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.sample_type.like(sample_type_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
sample.sample_name.like(experiment.sample_name),
sample.sample_type.like(sample_type_I),
sample.sample_dilution == dilution_I,
sample_description.sample_id.like(sample.sample_id),
sample_description.time_point.like(time_point_I)).group_by(
sample.sample_name).order_by(
sample.sample_name).all();
sample_name_O = [];
sample_name_short_O = [];
for sn in sample_name_short:
sample_name_O.append(sn.sample_name);
sample_name_short_O.append(sn.sample_name_short);
return sample_name_short_O;
except SQLAlchemyError as e:
print(e);
# query sample names and sample name abbreviations
def get_sampleNamesAndAbbreviations_experimentIDAndSampleTypeAndTimePointAndDilution(experiment_id_I,sample_type_I,tp,dilution_I,exp_type_I=5):
'''Querry sample name and sample abbreviation that are used from
the experimentfor specific time-points and dilutions'''
try:
sample_name_abbreviation = self.session.query(sample.sample_name,
sample_description.sample_name_abbreviation).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.sample_type.like(sample_type_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
sample.sample_name.like(experiment.sample_name),
sample.sample_type.like(sample_type_I),
sample.sample_dilution == dilution_I,
sample_description.sample_id.like(sample.sample_id),
sample_description.time_point.like(time_point_I)).group_by(
sample.sample_name).order_by(
sample.sample_name).all();
sample_name_O = [];
sample_name_abbreviation_O = [];
for sn in sample_name_abbreviation:
sample_name_O.append(sn.sample_name);
sample_name_abbreviation_O.append(sn.sample_name_abbreviation);
return sample_name_O, sample_name_abbreviation_O;
except SQLAlchemyError as e:
print(e);
# query sample name short from data_stage01_isotopomer_mqresultstable
def get_sampleNameShort_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=5):
'''Querry sample name short that are used from
the experiment'''
try:
sample_name_short = self.session.query(sample_description.sample_name_short).filter(
sample.sample_type.like(sample_type_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_short).order_by(
sample_description.sample_name_short.asc()).all();
sample_name_short_O = [];
for sns in sample_name_short: sample_name_short_O.append(sns.sample_name_short);
return sample_name_short_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameShort_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=5):
'''Querry sample name short that are used from
the experiment'''
try:
sample_name_short = self.session.query(sample_description.sample_name_short).filter(
sample.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_short).all();
sample_name_short_O = sample_name_short[0];
return sample_name_short_O;
except SQLAlchemyError as e:
print(e);
# query sample name abbreviations from data_stage01_isotopomer_mqresultstable
def get_sampleNameAbbreviations_experimentID(self,experiment_id_I,exp_type_I=5):
'''Querry sample name abbreviations that are used from
the experiment'''
try:
sample_name_abbreviations = self.session.query(sample_description.sample_name_abbreviation).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_name_abbreviations_O = [];
for sna in sample_name_abbreviations: sample_name_abbreviations_O.append(sna.sample_name_abbreviation);
return sample_name_abbreviations_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAbbreviations_experimentIDAndSampleType(self,experiment_id_I,sample_type_I,exp_type_I=5):
'''Querry sample name abbreviations that are used from
the experiment'''
try:
sample_name_abbreviations = self.session.query(sample_description.sample_name_abbreviation).filter(
sample.sample_type.like(sample_type_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_name_abbreviations_O = [];
for sna in sample_name_abbreviations: sample_name_abbreviations_O.append(sna.sample_name_abbreviation);
return sample_name_abbreviations_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAbbreviations_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=5):
'''Querry sample name abbreviations that are used from
the experiment by sample name'''
try:
sample_name_abbreviations = self.session.query(sample_description.sample_name_abbreviation).filter(
sample.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_name_abbreviations_O = [];
for sna in sample_name_abbreviations: sample_name_abbreviations_O.append(sna.sample_name_abbreviation);
return sample_name_abbreviations_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndDilution(self,experiment_id_I,sample_type_I,time_point_I,dilution_I,exp_type_I=5):
'''Querry sample name abbreviations that are used from
the experiment for specific time-points and dilutions'''
try:
sample_name_abbreviations = self.session.query(
sample_description.sample_name_abbreviation).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
sample.sample_name.like(experiment.sample_name),
sample.sample_type.like(sample_type_I),
sample.sample_dilution == dilution_I,
sample_description.sample_id.like(sample.sample_id),
sample_description.time_point.like(time_point_I)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation).all();
sample_name_abbreviations_O = [];
for sn in sample_name_abbreviations:
sample_name_abbreviations_O.append(sn[0]);
return sample_name_abbreviations_O;
except SQLAlchemyError as e:
print(e);
# query dilutions from data_stage01_isotopomer_mqresultstable
def get_sampleDilution_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=5):
'''Querry dilutions that are used from the experiment'''
try:
sample_dilutions = self.session.query(sample.sample_dilution).filter(
sample.sample_id.like(sample_id_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample.sample_dilution).order_by(
sample.sample_dilution.asc()).all();
sample_dilutions_O = [];
for sd in sample_dilutions: sample_dilutions_O.append(sd.sample_dilution);
return sample_dilutions_O;
except SQLAlchemyError as e:
print(e);
def get_sampleDilution_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=5):
'''Querry dilutions that are used from the experiment'''
try:
sample_dilutions = self.session.query(sample.sample_dilution).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample.sample_dilution).order_by(
sample.sample_dilution.asc()).all();
sample_dilutions_O = [];
for sd in sample_dilutions: sample_dilutions_O.append(sd.sample_dilution);
return sample_dilutions_O;
except SQLAlchemyError as e:
print(e);
def get_sampleDilution_experimentIDAndTimePoint(self,experiment_id_I,time_point_I,exp_type_I=5):
'''Querry dilutions that are used from the experiment'''
try:
sample_dilutions = self.session.query(sample.sample_dilution).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(sample.sample_name),
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
sample.sample_id.like(sample_description.sample_id),
sample_description.time_point.like(time_point_I)).group_by(
sample.sample_dilution).order_by(
sample.sample_dilution.asc()).all();
sample_dilutions_O = [];
for sd in sample_dilutions: sample_dilutions_O.append(sd.sample_dilution);
return sample_dilutions_O;
except SQLAlchemyError as e:
print(e);
# query time points from data_stage01_isotopomer_mqresultstable
def get_timePoint_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=5):
'''Querry time points that are used from the experiment and sample name abbreviation'''
try:
time_points = self.session.query(sample_description.time_point).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
sample_description.time_point).order_by(
sample_description.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=5):
'''Querry time points that are used from the experiment and sample name'''
try:
time_points = self.session.query(sample_description.time_point).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(sample_name_I),
experiment.sample_name.like(sample.sample_name),
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
sample.sample_id.like(sample_description.sample_id)).group_by(
sample_description.time_point).order_by(
sample_description.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentID(self,experiment_id_I,exp_type_I=5):
'''Querry time points that are used from the experiment and sample name'''
try:
time_points = self.session.query(sample_description.time_point).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(sample.sample_name),
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
sample.sample_id.like(sample_description.sample_id)).group_by(
sample_description.time_point).order_by(
sample_description.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# query component names from data_stage01_isotopomer_mqresultstable
def get_componentsNames_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=5):
'''Querry component names that are used and are not IS from
the experiment and sample_id'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
data_stage01_isotopomer_MQResultsTable.is_.is_(False),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_id_I),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=5):
'''Querry component names that are used from
the experiment and sample_name_abbreviation'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
data_stage01_isotopomer_MQResultsTable.is_.is_(False)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=5):
'''Querry component names that are used and not internal standards from
the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name).filter(
experiment.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
data_stage01_isotopomer_MQResultsTable.is_.is_(False)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
# query component group names from data_stage01_isotopomer_mqresultstable
def get_componentGroupNames_sampleName(self,sample_name_I):
'''Querry component group names that are used from the sample name
NOTE: intended to be used within a for loop'''
try:
component_group_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_group_name).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_group_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_group_name.asc()).all();
component_group_names_O = [];
for cgn in component_group_names: component_group_names_O.append(cgn.component_group_name);
return component_group_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentGroupName_experimentIDAndComponentName(self,experiment_id_I,component_name_I,exp_type_I=5):
'''Querry component group names that are used from the component name
NOTE: intended to be used within a for loop'''
try:
component_group_name = self.session.query(data_stage01_isotopomer_MQResultsTable.component_group_name).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_group_name).all();
if len(component_group_name)>1:
print('more than 1 component_group_name retrieved per component_name')
component_group_name_O = component_group_name[0];
return component_group_name_O;
except SQLAlchemyError as e:
print(e);
# query component names, group names, intensity,
# precursor formula, product formula, precursor mass, product mass
def get_componentsNamesAndData_experimentIDAndSampleNameAndMSMethodType(self,experiment_id_I,sample_name_I,ms_methodtype_I,exp_type_I=5):
'''Querry component names, group names, fragment formula, and fragment mass
that are used the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
data_stage01_isotopomer_MQResultsTable.height, #peak height
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
MS_components.component_name.like(data_stage01_isotopomer_MQResultsTable.component_name),
MS_components.ms_methodtype.like(ms_methodtype_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
component_group_names_O = [];
intensities_O = [];
precursor_formulas_O = [];
precursor_masses_O = [];
product_formulas_O = [];
product_masses_O = [];
for cn in component_names:
component_names_O.append(cn.component_name);
component_group_names_O.append(cn. component_group_name);
intensities_O.append(cn.height);
precursor_formulas_O.append(cn.precursor_formula);
precursor_masses_O.append(cn.precursor_exactmass);
product_formulas_O.append(cn.product_formula);
product_masses_O.append(cn.product_exactmass);
return component_names_O, component_group_names_O, intensities_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
except SQLAlchemyError as e:
print(e);
# query component names, group names, precursor formula, product formula, precursor mass, product mass
def get_componentsNamesAndOther_experimentIDAndSampleNameAndMSMethodTypeAndTimePointAndDilution(self,experiment_id_I,sample_name_abbreviation_I,ms_methodtype_I,time_point_I,dilution_I,exp_type_I=5):
'''Querry component names, group names, fragment formula, and fragment mass
that are used the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
sample.sample_id.like(sample_description.sample_id),
sample.sample_dilution == dilution_I,
experiment.sample_name.like(sample.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
MS_components.component_name.like(data_stage01_isotopomer_MQResultsTable.component_name),
MS_components.ms_methodtype.like(ms_methodtype_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
component_group_names_O = [];
precursor_formulas_O = [];
precursor_masses_O = [];
product_formulas_O = [];
product_masses_O = [];
if not component_names:
print('No component information found for:');
print('experiment_id\tsample_name_abbreviation\tms_methodtype\ttime_point,dilution');
print(experiment_id_I,sample_name_abbreviation_I,ms_methodtype_I,time_point_I,dilution_I);
return component_names_O, component_group_names_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
else:
for cn in component_names:
component_names_O.append(cn.component_name);
component_group_names_O.append(cn. component_group_name);
precursor_formulas_O.append(cn.precursor_formula);
precursor_masses_O.append(cn.precursor_exactmass);
product_formulas_O.append(cn.product_formula);
product_masses_O.append(cn.product_exactmass);
return component_names_O, component_group_names_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
except SQLAlchemyError as e:
print(e);
# query component names, group names, precursor formula, product formula, precursor mass, product mass
def get_componentsNamesAndOther_experimentIDAndSampleNameAndMSMethodTypeAndTimePointAndDilutionAndMetID(self,experiment_id_I,sample_name_abbreviation_I,ms_methodtype_I,time_point_I,dilution_I,met_id_I,exp_type_I=5):
'''Querry component names, group names, fragment formula, and fragment mass
that are used the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
sample.sample_id.like(sample_description.sample_id),
sample.sample_dilution == dilution_I,
experiment.sample_name.like(sample.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
MS_components.component_name.like(data_stage01_isotopomer_MQResultsTable.component_name),
MS_components.ms_methodtype.like(ms_methodtype_I),
MS_components.met_id.like(met_id_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
component_group_names_O = [];
precursor_formulas_O = [];
precursor_masses_O = [];
product_formulas_O = [];
product_masses_O = [];
#component_names_O = None;
#component_group_names_O = None;
#precursor_formulas_O = None;
#precursor_masses_O = None;
#product_formulas_O = None;
#product_masses_O = None;
if not component_names:
print('No component information found for:');
print('experiment_id\tsample_name_abbreviation\tms_methodtype\ttime_point\tdilution\tmet_id');
print(experiment_id_I,sample_name_abbreviation_I,ms_methodtype_I,time_point_I,dilution_I,met_id_I);
return component_names_O, component_group_names_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
else:
for cn in component_names:
component_names_O.append(cn.component_name);
component_group_names_O.append(cn. component_group_name);
precursor_formulas_O.append(cn.precursor_formula);
precursor_masses_O.append(cn.precursor_exactmass);
product_formulas_O.append(cn.product_formula);
product_masses_O.append(cn.product_exactmass);
#component_names_O=component_names[0][0];
#component_group_names_O=component_names[0][1];
#precursor_formulas_O=component_names[0][2];
#precursor_masses_O=component_names[0][3];
#product_formulas_O=component_names[0][4];
#product_masses_O=component_names[0][5];
return component_names_O, component_group_names_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
except SQLAlchemyError as e:
print(e);
# query physiological parameters from data_stage01_isotopomer_mqresultstable
def get_CVSAndCVSUnitsAndODAndDilAndDilUnits_sampleName(self,sample_name_I):
'''Querry culture volume sampled, culture volume sampled units, and OD600 from sample name
NOTE: intended to be used within a for loop'''
try:
physiologicalParameters = self.session.query(sample_physiologicalParameters.culture_volume_sampled,
sample_physiologicalParameters.culture_volume_sampled_units,
sample_physiologicalParameters.od600,
sample_description.reconstitution_volume,
sample_description.reconstitution_volume_units).filter(
sample.sample_name.like(sample_name_I),
sample.sample_id.like(sample_physiologicalParameters.sample_id),
sample.sample_id.like(sample_description.sample_id)).all();
cvs_O = physiologicalParameters[0][0];
cvs_units_O = physiologicalParameters[0][1];
od600_O = physiologicalParameters[0][2];
dil_O = physiologicalParameters[0][3];
dil_units_O = physiologicalParameters[0][4];
return cvs_O, cvs_units_O, od600_O, dil_O, dil_units_O;
except SQLAlchemyError as e:
print(e);
def get_CVSAndCVSUnitsAndODAndDilAndDilUnits_sampleNameShort(self,sample_name_short_I):
'''Querry culture volume sampled, culture volume sampled units, and OD600 from sample name
NOTE: intended to be used within a for loop'''
try:
physiologicalParameters = self.session.query(sample_physiologicalParameters.culture_volume_sampled,
sample_physiologicalParameters.culture_volume_sampled_units,
sample_physiologicalParameters.od600,
sample_description.reconstitution_volume,
sample_description.reconstitution_volume_units).filter(
sample_description.sample_name_short.like(sample_name_short_I),
sample_description.sample_id.like(sample_physiologicalParameters.sample_id)).all();
cvs_O = physiologicalParameters[0][0];
cvs_units_O = physiologicalParameters[0][1];
od600_O = physiologicalParameters[0][2];
dil_O = physiologicalParameters[0][3];
dil_units_O = physiologicalParameters[0][4];
return cvs_O, cvs_units_O, od600_O, dil_O, dil_units_O;
except SQLAlchemyError as e:
print(e);
def get_conversionAndConversionUnits_biologicalMaterialAndConversionName(self,biological_material_I,conversion_name_I):
'''Querry conversion and conversion units from
biological material and conversion name
NOTE: intended to be used within a for loop'''
try:
physiologicalParameters = self.session.query(biologicalMaterial_massVolumeConversion.conversion_factor,
biologicalMaterial_massVolumeConversion.conversion_units).filter(
biologicalMaterial_massVolumeConversion.biological_material.like(biological_material_I),
biologicalMaterial_massVolumeConversion.conversion_name.like(conversion_name_I)).all();
conversion_O = physiologicalParameters[0][0];
conversion_units_O = physiologicalParameters[0][1];
return conversion_O, conversion_units_O;
except SQLAlchemyError as e:
print(e);
# query data from data_stage01_isotopomer_mqresultstable
def get_concAndConcUnits_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Querry data (i.e. concentration, area/peak height ratio) from sample name and component name
NOTE: intended to be used within a for loop'''
# check for absolute or relative quantitation (i.e. area/peak height ratio)
try:
use_conc = self.session.query(data_stage01_isotopomer_MQResultsTable.use_calculated_concentration).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if use_conc:
use_conc_O = use_conc[0][0];
else:
use_conc_O = None;
except SQLAlchemyError as e:
print(e);
if use_conc_O:
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.calculated_concentration,
data_stage01_isotopomer_MQResultsTable.conc_units).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = data[0][1];
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
else:
# check for area or peak height ratio from quantitation_method
try:
data = self.session.query(quantitation_method.use_area).filter(
experiment.sample_name.like(sample_name_I),
experiment.quantitation_method_id.like(quantitation_method.id),
quantitation_method.component_name.like(component_name_I)).all();
if data:
ratio_O = data[0][0];
else:
ratio_O = None;
except SQLAlchemyError as e:
print(e);
if ratio_O:
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.area_ratio).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'area_ratio';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
else:
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.height_ratio).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'height_ratio';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
def get_peakHeight_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Querry peakHeight from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.height).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if data:
height_O = data[0][0];
else:
height_O = None;
return height_O
except SQLAlchemyError as e:
print(e);
# query if used
def get_used_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Querry used from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.used_).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name_name.like(component_name_name_I)).all();
if data:
used_O = data[0];
else: used_O = None;
return used_O;
except SQLAlchemyError as e:
print(e);
# delet data from data_stage01_isotopomer_mqresultstable
def delete_row_sampleName(self,sampleNames_I):
'''Delete specific samples from an experiment by their sample name'''
deletes = [];
for d in sampleNames_I:
try:
delete = self.session.query(data_stage01_isotopomer_MQResultsTable).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(d['sample_name'])).delete(
synchronize_session=False);
if delete == 0:
print('row not found')
print(d);
deletes.append(delete);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_dataStage01IsotopomerMQResultsTable(self,data_I):
'''add rows of data_stage01_isotopomer_MQResultsTable'''
if data_I:
cnt = 0;
for d in data_I:
try:
if 'Index' in d:
data_add = data_stage01_isotopomer_MQResultsTable(d['Index'],
d['Sample Index'],
d['Original Filename'],
d['Sample Name'],
d['Sample ID'],
d['Sample Comment'],
d['Sample Type'],
d['Acquisition Date & Time'],
d['Rack Number'],
d['Plate Number'],
d['Vial Number'],
d['Dilution Factor'],
d['Injection Volume'],
d['Operator Name'],
d['Acq. Method Name'],
d['IS'],
d['Component Name'],
d['Component Index'],
d['Component Comment'],
d['IS Comment'],
d['Mass Info'],
d['IS Mass Info'],
d['IS Name'],
d['Component Group Name'],
d['Conc. Units'],
d['Failed Query'],
d['IS Failed Query'],
d['Peak Comment'],
d['IS Peak Comment'],
d['Actual Concentration'],
d['IS Actual Concentration'],
d['Concentration Ratio'],
d['Expected RT'],
d['IS Expected RT'],
d['Integration Type'],
d['IS Integration Type'],
d['Area'],
d['IS Area'],
d['Corrected Area'],
d['IS Corrected Area'],
d['Area Ratio'],
d['Height'],
d['IS Height'],
d['Corrected Height'],
d['IS Corrected Height'],
d['Height Ratio'],
d['Area / Height'],
d['IS Area / Height'],
d['Corrected Area/Height'],
d['IS Corrected Area/Height'],
d['Region Height'],
d['IS Region Height'],
d['Quality'],
d['IS Quality'],
d['Retention Time'],
d['IS Retention Time'],
d['Start Time'],
d['IS Start Time'],
d['End Time'],
d['IS End Time'],
d['Total Width'],
d['IS Total Width'],
d['Width at 50%'],
d['IS Width at 50%'],
d['Signal / Noise'],
d['IS Signal / Noise'],
d['Baseline Delta / Height'],
d['IS Baseline Delta / Height'],
d['Modified'],
d['Relative RT'],
d['Used'],
d['Calculated Concentration'],
d['Accuracy'],
d['Comment'],
d['Use_Calculated_Concentration']);
elif 'index_' in d:
data_add = data_stage01_isotopomer_MQResultsTable(d['index_'],
d['sample_index'],
d['original_filename'],
d['sample_name'],
d['sample_id'],
d['sample_comment'],
d['sample_type'],
d['acquisition_date_and_time'],
d['rack_number'],
d['plate_number'],
d['vial_number'],
d['dilution_factor'],
d['injection_volume'],
d['operator_name'],
d['acq_method_name'],
d['is_'],
d['component_name'],
d['component_index'],
d['component_comment'],
d['is_comment'],
d['mass_info'],
d['is_mass'],
d['is_name'],
d['component_group_name'],
d['conc_units'],
d['failed_query'],
d['is_failed_query'],
d['peak_comment'],
d['is_peak_comment'],
d['actual_concentration'],
d['is_actual_concentration'],
d['concentration_ratio'],
d['expected_rt'],
d['is_expected_rt'],
d['integration_type'],
d['is_integration_type'],
d['area'],
d['is_area'],
d['corrected_area'],
d['is_corrected_area'],
d['area_ratio'],
d['height'],
d['is_height'],
d['corrected_height'],
d['is_corrected_height'],
d['height_ratio'],
d['area_2_height'],
d['is_area_2_height'],
d['corrected_area2height'],
d['is_corrected_area2height'],
d['region_height'],
d['is_region_height'],
d['quality'],
d['is_quality'],
d['retention_time'],
d['is_retention_time'],
d['start_time'],
d['is_start_time'],
d['end_time'],
d['is_end_time'],
d['total_width'],
d['is_total_width'],
d['width_at_50'],
d['is_width_at_50'],
d['signal_2_noise'],
d['is_signal_2_noise'],
d['baseline_delta_2_height'],
d['is_baseline_delta_2_height'],
d['modified_'],
d['relative_rt'],
d['used_'],
d['calculated_concentration'],
d['accuracy_'],
d['comment_'],
d['use_calculated_concentration'],
);
self.session.add(data_add);
cnt = cnt + 1;
if cnt > 1000:
self.session.commit();
cnt = 0;
except IntegrityError as e:
print(e);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage01IsotopomerMQResultsTable(self,data_I):
'''update rows of data_stage01_isotopomer_MQResultsTable'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage01_isotopomer_MQResultsTable).filter(
data_stage01_isotopomer_MQResultsTable.component_name.like(d['Component Name']),
data_stage01_isotopomer_MQResultsTable.sample_name.like(d['Sample Name']),
data_stage01_isotopomer_MQResultsTable.acquisition_date_and_time == d['Acquisition Date & Time']).update(
{'index_':d['Index'],
'sample_index':d['Sample Index'],
'original_filename':d['Original Filename'],
'sample_name':d['Sample Name'],
'sample_id':d['Sample ID'],
'sample_comment':d['Sample Comment'],
'sample_type':d['Sample Type'],
'acquisition_date_and_time':d['Acquisition Date & Time'],
'rack_number':d['Rack Number'],
'plate_number':d['Plate Number'],
'vial_number':d['Vial Number'],
'dilution_factor':d['Dilution Factor'],
'injection_volume':d['Injection Volume'],
'operator_name':d['Operator Name'],
'acq_method_name':d['Acq. Method Name'],
'is_':d['IS'],
'component_name':d['Component Name'],
'component_index':d['Component Index'],
'component_comment':d['Component Comment'],
'is_comment':d['IS Comment'],
'mass_info':d['Mass Info'],
'is_mass':d['IS Mass Info'],
'is_name':d['IS Name'],
'component_group_name':d['Component Group Name'],
'conc_units':d['Conc. Units'],
'failed_query':d['Failed Query'],
'is_failed_query':d['IS Failed Query'],
'peak_comment':d['Peak Comment'],
'is_peak_comment':d['IS Peak Comment'],
'actual_concentration':d['Actual Concentration'],
'is_actual_concentration':d['IS Actual Concentration'],
'concentration_ratio':d['Concentration Ratio'],
'expected_rt':d['Expected RT'],
'is_expected_rt':d['IS Expected RT'],
'integration_type':d['Integration Type'],
'is_integration_type':d['IS Integration Type'],
'area':d['Area'],
'is_area':d['IS Area'],
'corrected_area':d['Corrected Area'],
'is_corrected_area':d['IS Corrected Area'],
'area_ratio':d['Area Ratio'],
'height':d['Height'],
'is_height':d['IS Height'],
'corrected_height':d['Corrected Height'],
'is_corrected_height':d['IS Corrected Height'],
'height_ratio':d['Height Ratio'],
'area_2_height':d['Area / Height'],
'is_area_2_height':d['IS Area / Height'],
'corrected_area2height':d['Corrected Area/Height'],
'is_corrected_area2height':d['IS Corrected Area/Height'],
'region_height':d['Region Height'],
'is_region_height':d['IS Region Height'],
'quality':d['Quality'],
'is_quality':d['IS Quality'],
'retention_time':d['Retention Time'],
'is_retention_time':d['IS Retention Time'],
'start_time':d['Start Time'],
'is_start_time':d['IS Start Time'],
'end_time':d['End Time'],
'is_end_time':d['IS End Time'],
'total_width':d['Total Width'],
'is_total_width':d['IS Total Width'],
'width_at_50':d['Width at 50%'],
'is_width_at_50':d['IS Width at 50%'],
'signal_2_noise':d['Signal / Noise'],
'is_signal_2_noise':d['IS Signal / Noise'],
'baseline_delta_2_height':d['Baseline Delta / Height'],
'is_baseline_delta_2_height':d['IS Baseline Delta / Height'],
'modified_':d['Modified'],
'relative_rt':d['Relative RT'],
'used_':d['Used'],
'calculated_concentration':d['Calculated Concentration'],
'accuracy_':d['Accuracy'],
'comment_':d['Comment'],
'use_calculated_concentration':d['Use_Calculated_Concentration']},
synchronize_session=False);
if data_update == 0:
print('row not found.')
print(d);
except SQLAlchemyError as e:
print(e);
self.session.commit(); | 59.613344 | 219 | 0.59688 | 7,632 | 74,159 | 5.415225 | 0.034853 | 0.083718 | 0.123763 | 0.138039 | 0.872438 | 0.855743 | 0.832926 | 0.811779 | 0.797455 | 0.775726 | 0 | 0.009861 | 0.325813 | 74,159 | 1,244 | 220 | 59.613344 | 0.816769 | 0.070376 | 0 | 0.650667 | 0 | 0 | 0.066023 | 0.009369 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040889 | false | 0 | 0.009778 | 0 | 0.090667 | 0.054222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
1f823c259718d70cf2828b1adb1a797022f7a97c | 133,964 | py | Python | veriloggen/types/axi.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | 232 | 2015-09-01T16:07:48.000Z | 2022-03-28T14:53:28.000Z | veriloggen/types/axi.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | 34 | 2015-08-21T09:13:03.000Z | 2022-03-21T23:52:44.000Z | veriloggen/types/axi.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | 46 | 2015-09-24T14:39:57.000Z | 2022-02-23T21:59:56.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import functools
import math
from collections import defaultdict
import veriloggen.core.vtypes as vtypes
from veriloggen.seq.seq import Seq
from veriloggen.fsm.fsm import FSM
import veriloggen.dataflow as _df
from veriloggen.dataflow.dataflow import DataflowManager
from veriloggen.dataflow.dtypes import make_condition, read_multi
from veriloggen.dataflow.dtypes import _Numeric as df_numeric
from . import util
BURST_FIXED = 0b00
BURST_INCR = 0b01
BURST_WRAP = 0b10
AxCACHE_NONCOHERENT = 0b0011
AxCACHE_COHERENT = 0b1111
AxPROT_NONCOHERENT = 0b000
AxPROT_COHERENT = 0b010
AxUSER_NONCOHERENT = 0b00
AxUSER_COHERENT = 0b01
xUSER_DEFAULT = 0b00
def _connect_ready(m, var, val):
prev_assign = var._get_assign()
if not prev_assign:
var.assign(val)
else:
prev_assign.overwrite_right(
vtypes.Ors(prev_assign.statement.right, val))
m.remove(prev_assign)
m.append(prev_assign)
class AxiInterfaceBase(object):
_I = util.t_Input
_O = util.t_OutputReg
def __init__(self, m, name=None,
datawidth=32, addrwidth=32,
id_width=0, user_width=0,
itype=None, otype=None):
if itype is None:
itype = self._I
if otype is None:
otype = self._O
self.m = m
self.name = name
self.datawidth = datawidth
self.addrwidth = addrwidth
self.id_width = id_width
self.user_width = user_width
self.itype = itype
self.otype = otype
class AxiLiteInterfaceBase(AxiInterfaceBase):
_I = util.t_Input
_O = util.t_OutputReg
def __init__(self, m, name=None,
datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
None, None,
itype, otype)
class AxiStreamInterfaceBase(AxiInterfaceBase):
_I = util.t_Input
_O = util.t_OutputReg
def __init__(self, m, name=None,
datawidth=32,
id_width=0, user_width=0, dest_width=0,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, None,
id_width, user_width,
itype, otype)
self.dest_width = dest_width
class AxiWriteAddress(AxiInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=2,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
if isinstance(id_width, int) and id_width == 0:
self.awid = None
else:
self.awid = util.make_port(
m, self.otype, name + '_awid', self.id_width, initval=0)
self.awaddr = util.make_port(
m, self.otype, name + '_awaddr', self.addrwidth, initval=0)
self.awlen = util.make_port(
m, self.otype, name + '_awlen', 8, initval=0)
self.awsize = util.make_port(
m, self.otype, name + '_awsize', 3, initval=0, no_reg=True)
self.awburst = util.make_port(
m, self.otype, name + '_awburst', 2, initval=0, no_reg=True)
self.awlock = util.make_port(
m, self.otype, name + '_awlock', 1, initval=0, no_reg=True)
self.awcache = util.make_port(
m, self.otype, name + '_awcache', 4, initval=0, no_reg=True)
self.awprot = util.make_port(
m, self.otype, name + '_awprot', 3, initval=0, no_reg=True)
self.awqos = util.make_port(
m, self.otype, name + '_awqos', 4, initval=0, no_reg=True)
if isinstance(user_width, int) and user_width == 0:
self.awuser = None
else:
self.awuser = util.make_port(
m, self.otype, name + '_awuser', self.user_width, initval=0, no_reg=True)
self.awvalid = util.make_port(
m, self.otype, name + '_awvalid', None, initval=0)
self.awready = util.make_port(
m, self.itype, name + '_awready', None, initval=0)
class AxiLiteWriteAddress(AxiLiteInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.awaddr = util.make_port(
m, self.otype, name + '_awaddr', self.addrwidth, initval=0)
self.awcache = util.make_port(
m, self.otype, name + '_awcache', 4, initval=0, no_reg=True)
self.awprot = util.make_port(
m, self.otype, name + '_awprot', 3, initval=0, no_reg=True)
self.awvalid = util.make_port(
m, self.otype, name + '_awvalid', None, initval=0)
self.awready = util.make_port(
m, self.itype, name + '_awready', None, initval=0)
class AxiWriteData(AxiInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=0,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
self.wdata = util.make_port(
m, self.otype, name + '_wdata', self.datawidth, initval=0)
self.wstrb = util.make_port(
m, self.otype, name + '_wstrb', self.datawidth // 8, initval=0)
self.wlast = util.make_port(
m, self.otype, name + '_wlast', None, initval=0)
if isinstance(user_width, int) and user_width == 0:
self.wuser = None
else:
self.wuser = util.make_port(
m, self.otype, name + '_wuser', self.user_width, initval=0, no_reg=True)
self.wvalid = util.make_port(
m, self.otype, name + '_wvalid', None, initval=0)
self.wready = util.make_port(
m, self.itype, name + '_wready', None, initval=0)
class AxiLiteWriteData(AxiLiteInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.wdata = util.make_port(
m, self.otype, name + '_wdata', self.datawidth, initval=0)
self.wstrb = util.make_port(
m, self.otype, name + '_wstrb', self.datawidth // 8, initval=0)
self.wvalid = util.make_port(
m, self.otype, name + '_wvalid', None, initval=0)
self.wready = util.make_port(
m, self.itype, name + '_wready', None, initval=0)
class AxiWriteResponse(AxiInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=0,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
if isinstance(id_width, int) and id_width == 0:
self.bid = None
else:
self.bid = util.make_port(
m, self.itype, name + '_bid', self.id_width, initval=0)
self.bresp = util.make_port(
m, self.itype, name + '_bresp', 2, initval=0, no_reg=True)
if isinstance(user_width, int) and user_width == 0:
self.buser = None
else:
self.buser = util.make_port(
m, self.itype, name + '_buser', self.user_width, initval=0, no_reg=True)
self.bvalid = util.make_port(
m, self.itype, name + '_bvalid', None, initval=0)
self.bready = util.make_port(
m, self.otype, name + '_bready', None, initval=0, no_reg=True)
class AxiLiteWriteResponse(AxiLiteInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.bresp = util.make_port(
m, self.itype, name + '_bresp', 2, initval=0, no_reg=True)
self.bvalid = util.make_port(
m, self.itype, name + '_bvalid', None, initval=0)
self.bready = util.make_port(
m, self.otype, name + '_bready', None, initval=0, no_reg=True)
class AxiReadAddress(AxiInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=2,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
if isinstance(id_width, int) and id_width == 0:
self.arid = None
else:
self.arid = util.make_port(
m, self.otype, name + '_arid', self.id_width, initval=0)
self.araddr = util.make_port(
m, self.otype, name + '_araddr', self.addrwidth, initval=0)
self.arlen = util.make_port(
m, self.otype, name + '_arlen', 8, initval=0)
self.arsize = util.make_port(
m, self.otype, name + '_arsize', 3, initval=0, no_reg=True)
self.arburst = util.make_port(
m, self.otype, name + '_arburst', 2, initval=0, no_reg=True)
self.arlock = util.make_port(
m, self.otype, name + '_arlock', 1, initval=0, no_reg=True)
self.arcache = util.make_port(
m, self.otype, name + '_arcache', 4, initval=0, no_reg=True)
self.arprot = util.make_port(
m, self.otype, name + '_arprot', 3, initval=0, no_reg=True)
self.arqos = util.make_port(
m, self.otype, name + '_arqos', 4, initval=0, no_reg=True)
if isinstance(user_width, int) and user_width == 0:
self.aruser = None
else:
self.aruser = util.make_port(
m, self.otype, name + '_aruser', self.user_width, initval=0, no_reg=True)
self.arvalid = util.make_port(
m, self.otype, name + '_arvalid', None, initval=0)
self.arready = util.make_port(
m, self.itype, name + '_arready', None, initval=0)
class AxiLiteReadAddress(AxiLiteInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.araddr = util.make_port(
m, self.otype, name + '_araddr', self.addrwidth, initval=0)
self.arcache = util.make_port(
m, self.otype, name + '_arcache', 4, initval=0, no_reg=True)
self.arprot = util.make_port(
m, self.otype, name + '_arprot', 3, initval=0, no_reg=True)
self.arvalid = util.make_port(
m, self.otype, name + '_arvalid', None, initval=0)
self.arready = util.make_port(
m, self.itype, name + '_arready', None, initval=0)
class AxiReadData(AxiInterfaceBase):
_O = util.t_Output
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=0,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
if isinstance(id_width, int) and id_width == 0:
self.rid = None
else:
self.rid = util.make_port(
m, self.itype, name + '_rid', self.id_width, initval=0)
self.rdata = util.make_port(
m, self.itype, name + '_rdata', self.datawidth, initval=0)
self.rresp = util.make_port(
m, self.itype, name + '_rresp', 2, initval=0, no_reg=True)
self.rlast = util.make_port(
m, self.itype, name + '_rlast', None, initval=0)
if isinstance(user_width, int) and user_width == 0:
self.ruser = None
else:
self.ruser = util.make_port(
m, self.itype, name + '_ruser', self.user_width, initval=0, no_reg=True)
self.rvalid = util.make_port(
m, self.itype, name + '_rvalid', None, initval=0)
self.rready = util.make_port(
m, self.otype, name + '_rready', None, initval=0)
class AxiLiteReadData(AxiLiteInterfaceBase):
_O = util.t_Output
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.rdata = util.make_port(
m, self.itype, name + '_rdata', self.datawidth, initval=0)
self.rresp = util.make_port(
m, self.itype, name + '_rresp', 2, initval=0, no_reg=True)
self.rvalid = util.make_port(
m, self.itype, name + '_rvalid', None, initval=0)
self.rready = util.make_port(
m, self.otype, name + '_rready', None, initval=0)
# AXI-Full Master
class AxiMasterWriteAddress(AxiWriteAddress):
pass
class AxiMasterWriteData(AxiWriteData):
pass
class AxiMasterWriteResponse(AxiWriteResponse):
pass
class AxiMasterReadAddress(AxiReadAddress):
pass
class AxiMasterReadData(AxiReadData):
pass
# AXI-Lite Master
class AxiLiteMasterWriteAddress(AxiLiteWriteAddress):
pass
class AxiLiteMasterWriteData(AxiLiteWriteData):
pass
class AxiLiteMasterWriteResponse(AxiLiteWriteResponse):
pass
class AxiLiteMasterReadAddress(AxiLiteReadAddress):
pass
class AxiLiteMasterReadData(AxiLiteReadData):
pass
# AXI-Full Slave
class AxiSlaveWriteAddress(AxiWriteAddress):
_I = util.t_Output
_O = util.t_Input
class AxiSlaveWriteData(AxiWriteData):
_I = util.t_Output
_O = util.t_Input
class AxiSlaveWriteResponse(AxiWriteResponse):
_I = util.t_OutputReg
_O = util.t_Input
class AxiSlaveReadAddress(AxiReadAddress):
_I = util.t_Output
_O = util.t_Input
class AxiSlaveReadData(AxiReadData):
_I = util.t_OutputReg
_O = util.t_Input
# AXI-Lite Slave
class AxiLiteSlaveWriteAddress(AxiLiteWriteAddress):
_I = util.t_Output
_O = util.t_Input
class AxiLiteSlaveWriteData(AxiLiteWriteData):
_I = util.t_Output
_O = util.t_Input
class AxiLiteSlaveWriteResponse(AxiLiteWriteResponse):
_I = util.t_OutputReg
_O = util.t_Input
class AxiLiteSlaveReadAddress(AxiLiteReadAddress):
_I = util.t_Output
_O = util.t_Input
class AxiLiteSlaveReadData(AxiLiteReadData):
_I = util.t_OutputReg
_O = util.t_Input
class AxiStreamInData(AxiStreamInterfaceBase):
_O = util.t_Output
def __init__(self, m, name=None, datawidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
itype=None, otype=None):
AxiStreamInterfaceBase.__init__(self, m, name, datawidth,
id_width, user_width, dest_width,
itype, otype)
self.tdata = util.make_port(
m, self.itype, name + '_tdata', self.datawidth, initval=0)
self.tvalid = util.make_port(
m, self.itype, name + '_tvalid', None, initval=0)
self.tready = util.make_port(
m, self.otype, name + '_tready', None, initval=0)
if not with_last:
self.tlast = None
else:
self.tlast = util.make_port(
m, self.itype, name + '_tlast', initval=0)
if not with_strb:
self.tstrb = None
else:
self.tstrb = util.make_port(
m, self.itype, name + '_tstrb', self.datawidth // 8, initval=0)
if isinstance(user_width, int) and user_width == 0:
self.tuser = None
else:
self.tuser = util.make_port(
m, self.itype, name + '_tuser', self.user_width, initval=0)
if isinstance(id_width, int) and id_width == 0:
self.tid = None
else:
self.tid = util.make_port(
m, self.itype, name + '_tid', self.id_width, initval=0)
if isinstance(dest_width, int) and dest_width == 0:
self.tdest = None
else:
self.tdest = util.make_port(
m, self.itype, name + '_tdest', self.dest_width, initval=0)
class AxiStreamOutData(AxiStreamInData):
_I = util.t_OutputReg
_O = util.t_Input
# AXI-Full
class AxiMaster(object):
burst_size_width = 8
boundary_size = 4096
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
waddr_burst_mode=BURST_INCR, raddr_burst_mode=BURST_INCR,
waddr_cache_mode=AxCACHE_NONCOHERENT, raddr_cache_mode=AxCACHE_NONCOHERENT,
waddr_prot_mode=AxPROT_NONCOHERENT, raddr_prot_mode=AxPROT_NONCOHERENT,
waddr_user_mode=AxUSER_NONCOHERENT, wdata_user_mode=xUSER_DEFAULT,
raddr_user_mode=AxUSER_NONCOHERENT,
noio=False, nodataflow=False, outstanding_wcount_width=3):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = noio
if not hasattr(self.m, 'masterbus'):
self.m.masterbus = []
self.m.masterbus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Reg if noio else None
self.waddr = AxiMasterWriteAddress(m, name, datawidth, addrwidth,
waddr_id_width, waddr_user_width, itype, otype)
self.wdata = AxiMasterWriteData(m, name, datawidth, addrwidth,
wdata_id_width, wdata_user_width, itype, otype)
self.wresp = AxiMasterWriteResponse(m, name, datawidth, addrwidth,
wresp_id_width, wresp_user_width, itype, otype)
self.raddr = AxiMasterReadAddress(m, name, datawidth, addrwidth,
raddr_id_width, raddr_user_width, itype, otype)
otype = util.t_Wire if noio else None
self.rdata = AxiMasterReadData(m, name, datawidth, addrwidth,
rdata_id_width, rdata_user_width, itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
self.waddr.awsize.assign(int(math.log(self.datawidth / 8, 2)))
self.waddr.awburst.assign(waddr_burst_mode)
self.waddr.awlock.assign(0)
self.waddr.awcache.assign(waddr_cache_mode)
self.waddr.awprot.assign(waddr_prot_mode)
self.waddr.awqos.assign(0)
if self.waddr.awuser is not None:
self.waddr.awuser.assign(waddr_user_mode)
if self.wdata.wuser is not None:
self.wdata.wuser.assign(wdata_user_mode)
self.wresp.bready.assign(1)
self.raddr.arsize.assign(int(math.log(self.datawidth / 8, 2)))
self.raddr.arburst.assign(raddr_burst_mode)
self.raddr.arlock.assign(0)
self.raddr.arcache.assign(raddr_cache_mode)
self.raddr.arprot.assign(raddr_prot_mode)
self.raddr.arqos.assign(0)
if self.raddr.aruser is not None:
self.raddr.aruser.assign(raddr_user_mode)
self.write_counters = []
self.read_counters = []
# outstanding write request
if outstanding_wcount_width < 2:
raise ValueError("outstanding_wcount_width must be 2 or more.")
self.outstanding_wcount_width = outstanding_wcount_width
self.outstanding_wcount = self.m.TmpReg(self.outstanding_wcount_width, initval=0,
prefix='outstanding_wcount')
self.seq.If(vtypes.Ands(self.wdata.wlast, self.wdata.wvalid, self.wdata.wready),
vtypes.Not(vtypes.Ands(self.wresp.bvalid, self.wresp.bready)),
self.outstanding_wcount < 2 ** self.outstanding_wcount_width - 1)(
self.outstanding_wcount.inc()
)
self.seq.If(vtypes.Not(vtypes.Ands(self.wdata.wlast, self.wdata.wvalid, self.wdata.wready)),
vtypes.Ands(self.wresp.bvalid, self.wresp.bready),
self.outstanding_wcount > 0)(
self.outstanding_wcount.dec()
)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
self._write_disabled = False
self._read_disabled = False
def disable_write(self):
ports = [self.waddr.awaddr(0),
self.waddr.awlen(0),
self.waddr.awvalid(0),
self.wdata.wdata(0),
self.wdata.wstrb(0),
self.wdata.wlast(0),
self.wdata.wvalid(0)]
if self.waddr.awid is not None:
ports.insert(0, self.waddr.awid(0))
self.seq(
*ports
)
self._write_disabled = True
def disable_read(self):
ports = [self.raddr.araddr(0),
self.raddr.arlen(0),
self.raddr.arvalid(0)]
if self.raddr.arid is not None:
ports.insert(0, self.raddr.arid(0))
self.seq(
*ports
)
self.rdata.rready.assign(0)
self._read_disabled = True
def mask_addr(self, addr):
s = util.log2(self.datawidth // 8)
return (addr >> s) << s
def check_boundary(self, addr, length, datawidth=None, boundary_size=None):
if datawidth is None:
datawidth = self.datawidth
if boundary_size is None:
boundary_size = self.boundary_size
mask = boundary_size - 1
return ((addr & mask) + (length << util.log2(datawidth // 8))) >= boundary_size
def rest_boundary(self, addr, datawidth=None, boundary_size=None):
if datawidth is None:
datawidth = self.datawidth
if boundary_size is None:
boundary_size = self.boundary_size
mask = boundary_size - 1
return (vtypes.Int(boundary_size) - (addr & mask)) >> util.log2(datawidth // 8)
def write_acceptable(self):
return self.outstanding_wcount < 2 ** self.outstanding_wcount_width - 2
def write_request(self, addr, length=1, cond=None):
"""
@return ack
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.waddr.awready, vtypes.Not(self.waddr.awvalid))
self.seq.If(ack)(
self.waddr.awid(0) if self.waddr.awid is not None else (),
self.waddr.awaddr(addr),
self.waddr.awlen(length - 1),
self.waddr.awvalid(1)
)
self.seq.Then().If(length == 0)(
self.waddr.awvalid(0)
)
# de-assert
self.seq.Delay(1)(
self.waddr.awvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.waddr.awvalid, vtypes.Not(self.waddr.awready)))(
self.waddr.awvalid(self.waddr.awvalid)
)
return ack
def write_request_counter(self, addr, length=1, cond=None, counter=None):
"""
@return ack, counter
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.waddr.awready, vtypes.Not(self.waddr.awvalid))
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0,
prefix='counter')
self.write_counters.append(counter)
self.seq.If(vtypes.Ands(ack, counter == 0))(
self.waddr.awid(0) if self.waddr.awid is not None else (),
self.waddr.awaddr(addr),
self.waddr.awlen(length - 1),
self.waddr.awvalid(1),
counter(length)
)
self.seq.Then().If(length == 0)(
self.waddr.awvalid(0)
)
# de-assert
self.seq.Delay(1)(
self.waddr.awvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.waddr.awvalid, vtypes.Not(self.waddr.awready)))(
self.waddr.awvalid(self.waddr.awvalid)
)
return ack, counter
def write_data(self, data, counter=None, cond=None):
"""
@return ack, last
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ands(counter > 0,
self.write_acceptable(),
vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid)))
last = self.m.TmpReg(initval=0, prefix='last')
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.wdata.wdata(data),
self.wdata.wvalid(1),
self.wdata.wlast(0),
self.wdata.wstrb(vtypes.Repeat(
vtypes.Int(1, 1), (self.wdata.datawidth // 8))),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.wdata.wlast(1),
last(1)
)
# de-assert
self.seq.Delay(1)(
self.wdata.wvalid(0),
self.wdata.wlast(0),
last(0)
)
# retry
self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))(
self.wdata.wvalid(self.wdata.wvalid),
self.wdata.wlast(self.wdata.wlast),
last(last)
)
return ack, last
def write_dataflow(self, data, counter=None, cond=None, when=None):
"""
@return done
'data' and 'when' must be dataflow variables
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
ack = vtypes.Ands(counter > 0,
self.write_acceptable(),
vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid)))
last = self.m.TmpReg(initval=0, prefix='last')
if cond is None:
cond = ack
else:
cond = (cond, ack)
if when is None or not isinstance(when, df_numeric):
raw_data, raw_valid = data.read(cond=cond)
else:
data_list, raw_valid = read_multi(self.m, data, when, cond=cond)
raw_data = data_list[0]
when = data_list[1]
when_cond = make_condition(when, ready=cond)
if when_cond is not None:
raw_valid = vtypes.Ands(when_cond, raw_valid)
# write condition
self.seq.If(raw_valid)
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.wdata.wdata(raw_data),
self.wdata.wvalid(1),
self.wdata.wlast(0),
self.wdata.wstrb(vtypes.Repeat(
vtypes.Int(1, 1), (self.wdata.datawidth // 8))),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.wdata.wlast(1),
last(1)
)
# de-assert
self.seq.Delay(1)(
self.wdata.wvalid(0),
self.wdata.wlast(0),
last(0)
)
# retry
self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))(
self.wdata.wvalid(self.wdata.wvalid),
self.wdata.wlast(self.wdata.wlast),
last(last)
)
done = vtypes.Ands(last, self.wdata.wvalid, self.wdata.wready)
return done
def write_completed(self):
return self.outstanding_wcount == 0
def read_request(self, addr, length=1, cond=None):
"""
@return ack
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.raddr.arready, vtypes.Not(self.raddr.arvalid))
self.seq.If(ack)(
self.raddr.arid(0) if self.raddr.arid is not None else (),
self.raddr.araddr(addr),
self.raddr.arlen(length - 1),
self.raddr.arvalid(1)
)
# de-assert
self.seq.Delay(1)(
self.raddr.arvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.raddr.arvalid, vtypes.Not(self.raddr.arready)))(
self.raddr.arvalid(self.raddr.arvalid)
)
return ack
def read_request_counter(self, addr, length=1, cond=None, counter=None):
"""
@return ack, counter
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.raddr.arready, vtypes.Not(self.raddr.arvalid))
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0, prefix='counter')
self.read_counters.append(counter)
self.seq.If(vtypes.Ands(ack, counter == 0))(
self.raddr.arid(0) if self.raddr.arid is not None else (),
self.raddr.araddr(addr),
self.raddr.arlen(length - 1),
self.raddr.arvalid(1),
counter(length)
)
# de-assert
self.seq.Delay(1)(
self.raddr.arvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.raddr.arvalid, vtypes.Not(self.raddr.arready)))(
self.raddr.arvalid(self.raddr.arvalid)
)
return ack, counter
def read_data(self, counter=None, cond=None):
"""
@return data, valid, last
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.read_counters[-1]
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.rdata.rready._get_module(), self.rdata.rready, val)
ack = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
data = self.rdata.rdata
valid = ack
last = self.rdata.rlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
counter.dec()
)
return data, valid, last
def read_dataflow(self, counter=None, cond=None, point=0, signed=True):
"""
@return data, last, done
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.read_counters[-1]
data_ready = self.m.TmpWire(prefix='data_ready')
last_ready = self.m.TmpWire(prefix='last_ready')
data_ready.assign(1)
last_ready.assign(1)
if cond is None:
cond = (data_ready, last_ready)
elif isinstance(cond, (tuple, list)):
cond = tuple(list(cond) + [data_ready, last_ready])
else:
cond = (cond, data_ready, last_ready)
ready = make_condition(*cond)
val = 1 if ready is None else ready
_connect_ready(self.rdata.rready._get_module(), self.rdata.rready, val)
ack = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
data = self.rdata.rdata
valid = self.rdata.rvalid
last = self.rdata.rlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
counter.dec()
)
df = self.df if self.df is not None else _df
df_data = df.Variable(data, valid, data_ready,
width=self.datawidth, point=point, signed=signed)
df_last = df.Variable(last, valid, last_ready, width=1, signed=False)
done = vtypes.Ands(last, self.rdata.rvalid, self.rdata.rready)
return df_data, df_last, done
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
if '_'.join([name, 'awid']) in ports:
awid = ports['_'.join([name, 'awid'])]
else:
awid = None
awaddr = ports['_'.join([name, 'awaddr'])]
awlen = ports['_'.join([name, 'awlen'])]
awsize = ports['_'.join([name, 'awsize'])]
awburst = ports['_'.join([name, 'awburst'])]
awlock = ports['_'.join([name, 'awlock'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awqos = ports['_'.join([name, 'awqos'])]
if '_'.join([name, 'awuser']) in ports:
awuser = ports['_'.join([name, 'awuser'])]
else:
awuser = None
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
if awid is not None:
awid.connect(self.waddr.awid if self.waddr.awid is not None else 0)
awaddr.connect(self.waddr.awaddr)
awlen.connect(self.waddr.awlen)
awsize.connect(self.waddr.awsize)
awburst.connect(self.waddr.awburst)
awlock.connect(self.waddr.awlock)
awcache.connect(self.waddr.awcache)
awprot.connect(self.waddr.awprot)
awqos.connect(self.waddr.awqos)
if awuser is not None:
awuser.connect(self.waddr.awuser if self.waddr.awuser is not None else 0)
awvalid.connect(self.waddr.awvalid)
self.waddr.awready.connect(awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wlast = ports['_'.join([name, 'wlast'])]
if '_'.join([name, 'wuser']) in ports:
wuser = ports['_'.join([name, 'wuser'])]
else:
wuser = None
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
wdata.connect(self.wdata.wdata)
wstrb.connect(self.wdata.wstrb)
wlast.connect(self.wdata.wlast)
if wuser is not None:
wuser.connect(self.wdata.wuser if self.wdata.wuser is not None else 0)
wvalid.connect(self.wdata.wvalid)
self.wdata.wready.connect(wready)
if '_'.join([name, 'bid']) in ports:
bid = ports['_'.join([name, 'bid'])]
else:
bid = None
bresp = ports['_'.join([name, 'bresp'])]
if '_'.join([name, 'buser']) in ports:
buser = ports['_'.join([name, 'buser'])]
else:
buser = None
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
if self.wresp.bid is not None:
self.wresp.bid.connect(bid if bid is not None else 0)
self.wresp.bresp.connect(bresp)
if self.wresp.buser is not None:
self.wresp.buser.connect(buser if buser is not None else 0)
self.wresp.bvalid.connect(bvalid)
bready.connect(self.wresp.bready)
if '_'.join([name, 'arid']) in ports:
arid = ports['_'.join([name, 'arid'])]
else:
arid = None
araddr = ports['_'.join([name, 'araddr'])]
arlen = ports['_'.join([name, 'arlen'])]
arsize = ports['_'.join([name, 'arsize'])]
arburst = ports['_'.join([name, 'arburst'])]
arlock = ports['_'.join([name, 'arlock'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arqos = ports['_'.join([name, 'arqos'])]
if '_'.join([name, 'aruser']) in ports:
aruser = ports['_'.join([name, 'aruser'])]
else:
aruser = None
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
if arid is not None:
arid.connect(self.raddr.arid if self.raddr.arid is not None else 0)
araddr.connect(self.raddr.araddr)
arlen.connect(self.raddr.arlen)
arsize.connect(self.raddr.arsize)
arburst.connect(self.raddr.arburst)
arlock.connect(self.raddr.arlock)
arcache.connect(self.raddr.arcache)
arprot.connect(self.raddr.arprot)
arqos.connect(self.raddr.arqos)
if aruser is not None:
aruser.connect(self.raddr.aruser if self.raddr.aruser is not None else 0)
arvalid.connect(self.raddr.arvalid)
self.raddr.arready.connect(arready)
if '_'.join([name, 'rid']) in ports:
rid = ports['_'.join([name, 'rid'])]
else:
rid = None
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rlast = ports['_'.join([name, 'rlast'])]
if '_'.join([name, 'ruser']) in ports:
ruser = ports['_'.join([name, 'ruser'])]
else:
ruser = None
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
if self.rdata.rid is not None:
self.rdata.rid.connect(rid if rid is not None else 0)
self.rdata.rdata.connect(rdata)
self.rdata.rresp.connect(rresp)
self.rdata.rlast.connect(rlast)
if self.rdata.ruser is not None:
self.rdata.ruser.connect(ruser if ruser is not None else 0)
self.rdata.rvalid.connect(rvalid)
rready.connect(self.rdata.rready)
# AXI-Lite
class AxiLiteMaster(AxiMaster):
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_cache_mode=AxCACHE_NONCOHERENT, raddr_cache_mode=AxCACHE_NONCOHERENT,
waddr_prot_mode=AxPROT_NONCOHERENT, raddr_prot_mode=AxPROT_NONCOHERENT,
noio=False, nodataflow=False, outstanding_wcount_width=3):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = noio
if not hasattr(self.m, 'masterbus'):
self.m.masterbus = []
self.m.masterbus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Reg if noio else None
self.waddr = AxiLiteMasterWriteAddress(m, name, datawidth, addrwidth,
itype, otype)
self.wdata = AxiLiteMasterWriteData(m, name, datawidth, addrwidth,
itype, otype)
self.wresp = AxiLiteMasterWriteResponse(m, name, datawidth, addrwidth,
itype, otype)
self.raddr = AxiLiteMasterReadAddress(m, name, datawidth, addrwidth,
itype, otype)
otype = util.t_Wire if noio else None
self.rdata = AxiLiteMasterReadData(m, name, datawidth, addrwidth,
itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
self.waddr.awcache.assign(waddr_cache_mode)
self.waddr.awprot.assign(waddr_prot_mode)
self.wresp.bready.assign(1)
self.raddr.arcache.assign(raddr_cache_mode)
self.raddr.arprot.assign(raddr_prot_mode)
# outstanding write request
if outstanding_wcount_width < 2:
raise ValueError("outstanding_wcount_width must be 2 or more.")
self.outstanding_wcount_width = outstanding_wcount_width
self.outstanding_wcount = self.m.TmpReg(self.outstanding_wcount_width, initval=0,
prefix='outstanding_wcount')
self.seq.If(vtypes.Ands(self.wdata.wvalid, self.wdata.wready),
vtypes.Not(vtypes.Ands(self.wresp.bvalid, self.wresp.bready)),
self.outstanding_wcount < (2 ** self.outstanding_wcount_width - 1))(
self.outstanding_wcount.inc()
)
self.seq.If(vtypes.Not(vtypes.Ands(self.wdata.wvalid, self.wdata.wready)),
vtypes.Ands(self.wresp.bvalid, self.wresp.bready),
self.outstanding_wcount > 0)(
self.outstanding_wcount.dec()
)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
self._write_disabled = False
self._read_disabled = False
def disable_write(self):
ports = [self.waddr.awaddr(0),
self.waddr.awvalid(0),
self.wdata.wdata(0),
self.wdata.wstrb(0),
self.wdata.wvalid(0)]
self.seq(
*ports
)
self._write_disabled = True
def disable_read(self):
ports = [self.raddr.araddr(0),
self.raddr.arvalid(0)]
self.seq(
*ports
)
self.rdata.rready.assign(0)
self._read_disabled = True
def write_request(self, addr, length=1, cond=None):
"""
@return ack
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if length != 1:
raise ValueError('length must be 1 for lite-interface.')
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.waddr.awready, vtypes.Not(self.waddr.awvalid))
self.seq.If(ack)(
self.waddr.awaddr(addr),
self.waddr.awvalid(1),
)
# de-assert
self.seq.Delay(1)(
self.waddr.awvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.waddr.awvalid, vtypes.Not(self.waddr.awready)))(
self.waddr.awvalid(self.waddr.awvalid)
)
return ack
def write_data(self, data, cond=None):
"""
@return ack
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ands(self.write_acceptable(),
vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid)))
self.seq.If(ack)(
self.wdata.wdata(data),
self.wdata.wvalid(1),
self.wdata.wstrb(vtypes.Repeat(
vtypes.Int(1, 1), (self.wdata.datawidth // 8)))
)
# de-assert
self.seq.Delay(1)(
self.wdata.wvalid(0),
)
# retry
self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))(
self.wdata.wvalid(self.wdata.wvalid)
)
return ack
def write_dataflow(self, data, counter=None, cond=None, when=None):
"""
@return done
'data' and 'when' must be dataflow variables
"""
raise TypeError('lite interface support no dataflow operation.')
def write_completed(self):
return self.outstanding_wcount == 0
def read_request(self, addr, length=1, cond=None):
"""
@return ack
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if length != 1:
raise ValueError('length must be 1 for lite-interface.')
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.raddr.arready, vtypes.Not(self.raddr.arvalid))
self.seq.If(ack)(
self.raddr.araddr(addr),
self.raddr.arvalid(1)
)
# de-assert
self.seq.Delay(1)(
self.raddr.arvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.raddr.arvalid, vtypes.Not(self.raddr.arready)))(
self.raddr.arvalid(self.raddr.arvalid)
)
return ack
def read_data(self, cond=None):
"""
@return data, valid
"""
if self._read_disabled:
raise TypeError('Read disabled.')
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.rdata.rready._get_module(), self.rdata.rready, val)
ack = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
data = self.rdata.rdata
valid = ack
return data, valid
def read_dataflow(self, counter=None, cond=None, point=0, signed=True):
"""
@return data, last, done
"""
raise TypeError('lite interface support no dataflow operation.')
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
awaddr = ports['_'.join([name, 'awaddr'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
awaddr.connect(self.waddr.awaddr)
awcache.connect(self.waddr.awcache)
awprot.connect(self.waddr.awprot)
awvalid.connect(self.waddr.awvalid)
self.waddr.awready.connect(awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
wdata.connect(self.wdata.wdata)
wstrb.connect(self.wdata.wstrb)
wvalid.connect(self.wdata.wvalid)
self.wdata.wready.connect(wready)
bresp = ports['_'.join([name, 'bresp'])]
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
self.wresp.bresp.connect(bresp)
self.wresp.bvalid.connect(bvalid)
bready.connect(self.wresp.bready)
araddr = ports['_'.join([name, 'araddr'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
araddr.connect(self.raddr.araddr)
arcache.connect(self.raddr.arcache)
arprot.connect(self.raddr.arprot)
arvalid.connect(self.raddr.arvalid)
self.raddr.arready.connect(arready)
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
self.rdata.rdata.connect(rdata)
self.rdata.rresp.connect(rresp)
self.rdata.rvalid.connect(rvalid)
rready.connect(self.rdata.rready)
class AxiSlave(object):
burst_size_width = 8
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
wresp_user_mode=xUSER_DEFAULT,
rdata_user_mode=xUSER_DEFAULT,
noio=False, nodataflow=False):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = noio
if not hasattr(self.m, 'slavebus'):
self.m.slavebus = []
self.m.slavebus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Wire if noio else None
self.waddr = AxiSlaveWriteAddress(m, name, datawidth, addrwidth,
waddr_id_width, waddr_user_width, itype, otype)
self.wdata = AxiSlaveWriteData(m, name, datawidth, addrwidth,
wdata_id_width, wdata_user_width, itype, otype)
self.wresp = AxiSlaveWriteResponse(m, name, datawidth, addrwidth,
wresp_id_width, wresp_user_width, itype, otype)
self.raddr = AxiSlaveReadAddress(m, name, datawidth, addrwidth,
raddr_id_width, raddr_user_width, itype, otype)
itype = util.t_Reg if noio else None
self.rdata = AxiSlaveReadData(m, name, datawidth, addrwidth,
rdata_id_width, rdata_user_width, itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
self.wresp.bresp.assign(0)
if self.wresp.buser is not None:
self.wresp.buser.assign(wresp_user_mode)
self.rdata.rresp.assign(0)
if self.rdata.ruser is not None:
self.rdata.ruser.assign(rdata_user_mode)
# write response
if self.wresp.bid is not None:
self.seq.If(self.waddr.awvalid, self.waddr.awready, vtypes.Not(self.wresp.bvalid))(
self.wresp.bid(self.waddr.awid if self.waddr.awid is not None else 0)
)
if self.rdata.rid is not None:
self.seq.If(self.raddr.arvalid, self.raddr.arready)(
self.rdata.rid(self.raddr.arid if self.raddr.arid is not None else 0)
)
self.seq.If(self.wresp.bvalid, self.wresp.bready)(
self.wresp.bvalid(0)
)
self.seq.If(self.wdata.wvalid, self.wdata.wready, self.wdata.wlast)(
self.wresp.bvalid(1)
)
self.write_counters = []
self.read_counters = []
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
self._write_disabled = False
self._read_disabled = False
def disable_write(self):
self.waddr.awready.assign(0)
self.wdata.wready.assign(0)
self._write_disabled = True
def disable_read(self):
self.raddr.arready.assign(0)
ports = [self.rdata.rvalid(0),
self.rdata.rlast(0)]
self.seq(
*ports
)
self._read_disabled = True
def pull_request_counter(self, cond, counter=None):
"""
@return addr, counter, readvalid, writevalid
"""
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0, prefix='counter')
ready = make_condition(cond)
write_ack = vtypes.Ands(self.waddr.awready, self.waddr.awvalid,
vtypes.Not(self.wresp.bvalid))
read_ack = vtypes.Ands(self.raddr.arready, self.raddr.arvalid)
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
writevalid = self.m.TmpReg(initval=0, prefix='writevalid')
readvalid = self.m.TmpReg(initval=0, prefix='readvalid')
prev_awvalid = self.m.TmpReg(initval=0, prefix='prev_awvalid')
self.seq(
prev_awvalid(self.waddr.awvalid)
)
prev_arvalid = self.m.TmpReg(initval=0, prefix='prev_arvalid')
self.seq(
prev_arvalid(self.raddr.arvalid)
)
writeval = vtypes.Ands(vtypes.Not(writevalid), vtypes.Not(readvalid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid)
if ready is not None:
writeval = vtypes.Ands(ready, writeval)
readval = vtypes.Ands(vtypes.Not(readvalid), vtypes.Not(writevalid),
prev_arvalid, vtypes.Not(prev_awvalid))
if ready is not None:
readval = vtypes.Ands(ready, readval)
_connect_ready(self.waddr.awready._get_module(),
self.waddr.awready, writeval)
_connect_ready(self.raddr.arready._get_module(),
self.raddr.arready, readval)
self.seq(
writevalid(0),
readvalid(0)
)
self.seq.If(write_ack)(
addr(self.waddr.awaddr),
counter(self.waddr.awlen + 1),
writevalid(1)
).Elif(read_ack)(
addr(self.raddr.araddr),
counter(self.raddr.arlen + 1),
readvalid(1)
)
return addr, counter, readvalid, writevalid
def pull_write_request_counter(self, cond=None, counter=None):
"""
@return addr, counter, valid
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0,
prefix='counter')
self.write_counters.append(counter)
ready = make_condition(cond)
ack = vtypes.Ands(self.waddr.awready, self.waddr.awvalid,
vtypes.Not(self.wresp.bvalid))
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
valid = self.m.TmpReg(initval=0, prefix='valid')
prev_awvalid = self.m.TmpReg(initval=0, prefix='prev_awvalid')
self.seq(
prev_awvalid(self.waddr.awvalid)
)
val = (vtypes.Ands(vtypes.Not(valid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid) if ready is None else
vtypes.Ands(ready, vtypes.Not(valid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid))
_connect_ready(self.waddr.awready._get_module(),
self.waddr.awready, val)
self.seq.If(ack)(
addr(self.waddr.awaddr),
counter(self.waddr.awlen + 1)
)
self.seq(
valid(ack)
)
return addr, counter, valid
def pull_write_data(self, counter=None, cond=None):
"""
@return data, mask, valid, last
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.wdata.wready._get_module(), self.wdata.wready, val)
ack = vtypes.Ands(self.wdata.wready, self.wdata.wvalid)
data = self.wdata.wdata
mask = self.wdata.wstrb
valid = ack
last = self.wdata.wlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
counter.dec()
)
return data, mask, valid, last
def pull_write_dataflow(self, counter=None, cond=None):
"""
@return data, mask, last, done
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
data_ready = self.m.TmpWire(prefix='data_ready')
mask_ready = self.m.TmpWire(prefix='mask_ready')
last_ready = self.m.TmpWire(prefix='last_ready')
data_ready.assign(1)
mask_ready.assign(1)
last_ready.assign(1)
if cond is None:
cond = (data_ready, last_ready)
elif isinstance(cond, (tuple, list)):
cond = tuple(list(cond) + [data_ready, last_ready])
else:
cond = (cond, data_ready, last_ready)
ready = make_condition(*cond)
val = 1 if ready is None else ready
_connect_ready(self.wdata.wready._get_module(), self.wdata.wready, val)
ack = vtypes.Ands(self.wdata.wready, self.wdata.wvalid)
data = self.wdata.wdata
mask = self.wdata.wstrb
valid = self.wdata.wvalid
last = self.wdata.wlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
counter.dec()
)
df_data = self.df.Variable(data, valid, data_ready,
width=self.datawidth, signed=False)
df_mask = self.df.Variable(mask, valid, mask_ready,
width=self.datawidth // 4, signed=False)
df_last = self.df.Variable(last, valid, last_ready,
width=1, signed=False)
done = vtypes.Ands(last, self.wdata.wvalid, self.wdata.wready)
return df_data, df_mask, df_last, done
def pull_read_request_counter(self, cond=None, counter=None):
"""
@return addr, counter, valid
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0, prefix='counter')
self.read_counters.append(counter)
ready = make_condition(cond)
ack = vtypes.Ands(self.raddr.arready, self.raddr.arvalid)
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
valid = self.m.TmpReg(initval=0, prefix='valid')
prev_arvalid = self.m.TmpReg(initval=0, prefix='prev_arvalid')
self.seq(
prev_arvalid(self.raddr.arvalid)
)
val = (vtypes.Ands(vtypes.Not(valid), prev_arvalid) if ready is None else
vtypes.Ands(ready, vtypes.Not(valid), prev_arvalid))
_connect_ready(self.raddr.arready._get_module(),
self.raddr.arready, val)
self.seq.If(ack)(
addr(self.raddr.araddr),
counter(self.raddr.arlen + 1)
)
self.seq(
valid(ack)
)
return addr, counter, valid
def push_read_data(self, data, counter=None, cond=None):
"""
@return ack, valid, last
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.read_counters[-1]
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ands(counter > 0,
vtypes.Ors(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))
valid = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
last = self.rdata.rlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.rdata.rdata(data),
self.rdata.rvalid(1),
self.rdata.rlast(0),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.rdata.rlast(1)
)
# de-assert
self.seq.Delay(1)(
self.rdata.rvalid(0),
self.rdata.rlast(0)
)
# retry
self.seq.If(vtypes.Ands(self.rdata.rvalid, vtypes.Not(self.rdata.rready)))(
self.rdata.rvalid(self.rdata.rvalid),
self.rdata.rlast(self.rdata.rlast)
)
return ack, valid, last
def push_read_dataflow(self, data, counter=None, cond=None):
"""
@return done
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.read_counters[-1]
ack = vtypes.Ands(counter > 0,
vtypes.Ors(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))
if cond is None:
cond = ack
else:
cond = (cond, ack)
raw_data, raw_valid = data.read(cond=cond)
# write condition
self.seq.If(raw_valid)
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.rdata.rdata(raw_data),
self.rdata.rvalid(1),
self.rdata.rlast(0),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.rdata.rlast(1)
)
# de-assert
self.seq.Delay(1)(
self.rdata.rvalid(0),
self.rdata.rlast(0)
)
# retry
self.seq.If(vtypes.Ands(self.rdata.rvalid, vtypes.Not(self.rdata.rready)))(
self.rdata.rvalid(self.rdata.rvalid),
self.rdata.rlast(self.rdata.rlast)
)
done = vtypes.Ands(self.rdata.rlast, self.rdata.rvalid, self.rdata.rready)
return done
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
ports = defaultdict(lambda: None, ports)
if '_'.join([name, 'awid']) in ports:
awid = ports['_'.join([name, 'awid'])]
else:
awid = None
awaddr = ports['_'.join([name, 'awaddr'])]
awlen = ports['_'.join([name, 'awlen'])]
awsize = ports['_'.join([name, 'awsize'])]
awburst = ports['_'.join([name, 'awburst'])]
awlock = ports['_'.join([name, 'awlock'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awqos = ports['_'.join([name, 'awqos'])]
if '_'.join([name, 'awuser']) in ports:
awuser = ports['_'.join([name, 'awuser'])]
else:
awuser = None
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
if self.waddr.awid is not None:
self.waddr.awid.connect(awid if awid is not None else 0)
self.waddr.awaddr.connect(awaddr)
self.waddr.awlen.connect(awlen if awlen is not None else 0)
self.waddr.awsize.connect(awsize if awsize is not None else
int(math.log(self.datawidth // 8)))
self.waddr.awburst.connect(awburst if awburst is not None else BURST_INCR)
self.waddr.awlock.connect(awlock if awlock is not None else 0)
self.waddr.awcache.connect(awcache)
self.waddr.awprot.connect(awprot)
self.waddr.awqos.connect(awqos if awqos is not None else 0)
if self.waddr.awuser is not None:
self.waddr.awuser.connect(awuser if awuser is not None else 0)
self.waddr.awvalid.connect(awvalid)
awready.connect(self.waddr.awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wlast = ports['_'.join([name, 'wlast'])]
if '_'.join([name, 'wuser']) in ports:
wuser = ports['_'.join([name, 'wuser'])]
else:
wuser = None
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
self.wdata.wdata.connect(wdata)
self.wdata.wstrb.connect(wstrb)
self.wdata.wlast.connect(wlast if wlast is not None else 1)
if self.wdata.wuser is not None:
self.wdata.wuser.connect(wuser if wuser is not None else 0)
self.wdata.wvalid.connect(wvalid)
wready.connect(self.wdata.wready)
if '_'.join([name, 'bid']) in ports:
bid = ports['_'.join([name, 'bid'])]
else:
bid = None
bresp = ports['_'.join([name, 'bresp'])]
if '_'.join([name, 'buser']) in ports:
buser = ports['_'.join([name, 'buser'])]
else:
buser = None
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
if bid is not None:
bid.connect(self.wresp.bid if self.wresp.bid is not None else 0)
bresp.connect(self.wresp.bresp)
if buser is not None:
buser.connect(self.wresp.buser if self.wresp.buser is not None else 0)
bvalid.connect(self.wresp.bvalid)
self.wresp.bready.connect(bready)
if '_'.join([name, 'arid']) in ports:
arid = ports['_'.join([name, 'arid'])]
else:
arid = None
araddr = ports['_'.join([name, 'araddr'])]
arlen = ports['_'.join([name, 'arlen'])]
arsize = ports['_'.join([name, 'arsize'])]
arburst = ports['_'.join([name, 'arburst'])]
arlock = ports['_'.join([name, 'arlock'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arqos = ports['_'.join([name, 'arqos'])]
if '_'.join([name, 'aruser']) in ports:
aruser = ports['_'.join([name, 'aruser'])]
else:
aruser = None
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
if self.raddr.arid is not None:
self.raddr.arid.connect(arid if arid is not None else 0)
self.raddr.araddr.connect(araddr)
self.raddr.arlen.connect(arlen if arlen is not None else 0)
self.raddr.arsize.connect(arsize if arsize is not None else
int(math.log(self.datawidth // 8)))
self.raddr.arburst.connect(arburst if arburst is not None else BURST_INCR)
self.raddr.arlock.connect(arlock if arlock is not None else 0)
self.raddr.arcache.connect(arcache)
self.raddr.arprot.connect(arprot)
self.raddr.arqos.connect(arqos if arqos is not None else 0)
if self.raddr.aruser is not None:
self.raddr.aruser.connect(aruser if aruser is not None else 0)
self.raddr.arvalid.connect(arvalid)
arready.connect(self.raddr.arready)
if '_'.join([name, 'rid']) in ports:
rid = ports['_'.join([name, 'rid'])]
else:
rid = None
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rlast = ports['_'.join([name, 'rlast'])]
if '_'.join([name, 'ruser']) in ports:
ruser = ports['_'.join([name, 'ruser'])]
else:
ruser = None
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
if rid is not None:
rid.connect(self.rdata.rid if self.rdata.rid is not None else 0)
rdata.connect(self.rdata.rdata)
rresp.connect(self.rdata.rresp)
if rlast is not None:
rlast.connect(self.rdata.rlast)
if ruser is not None:
ruser.connect(self.rdata.ruser if self.rdata.ruser is not None else 0)
rvalid.connect(self.rdata.rvalid)
self.rdata.rready.connect(rready)
class AxiLiteSlave(AxiSlave):
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
noio=False, nodataflow=False):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = noio
if not hasattr(self.m, 'slavebus'):
self.m.slavebus = []
self.m.slavebus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Wire if noio else None
self.waddr = AxiLiteSlaveWriteAddress(m, name, datawidth, addrwidth,
itype, otype)
self.wdata = AxiLiteSlaveWriteData(m, name, datawidth, addrwidth,
itype, otype)
self.wresp = AxiLiteSlaveWriteResponse(m, name, datawidth, addrwidth,
itype, otype)
self.raddr = AxiLiteSlaveReadAddress(m, name, datawidth, addrwidth,
itype, otype)
itype = util.t_Reg if noio else None
self.rdata = AxiLiteSlaveReadData(m, name, datawidth, addrwidth,
itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
self.wresp.bresp.assign(0)
self.rdata.rresp.assign(0)
# write response
self.seq.If(self.wresp.bvalid, self.wresp.bready)(
self.wresp.bvalid(0)
)
self.seq.If(self.wdata.wvalid, self.wdata.wready)(
self.wresp.bvalid(1)
)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
self._write_disabled = False
self._read_disabled = False
def disable_write(self):
self.waddr.awready.assign(0)
self.wdata.wready.assign(0)
self._write_disabled = True
def disable_read(self):
self.raddr.arready.assign(0)
ports = [self.rdata.rvalid(0)]
self.seq(
*ports
)
self._read_disabled = True
def pull_request(self, cond):
"""
@return addr, readvalid, writevalid
"""
ready = make_condition(cond)
write_ack = vtypes.Ands(self.waddr.awready, self.waddr.awvalid,
vtypes.Not(self.wresp.bvalid))
read_ack = vtypes.Ands(self.raddr.arready, self.raddr.arvalid)
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
writevalid = self.m.TmpReg(initval=0, prefix='writevalid')
readvalid = self.m.TmpReg(initval=0, prefix='readvalid')
prev_awvalid = self.m.TmpReg(initval=0, prefix='prev_awvalid')
self.seq(
prev_awvalid(self.waddr.awvalid)
)
prev_arvalid = self.m.TmpReg(initval=0, prefix='prev_arvalid')
self.seq(
prev_arvalid(self.raddr.arvalid)
)
writeval = vtypes.Ands(vtypes.Not(writevalid), vtypes.Not(readvalid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid)
if ready is not None:
writeval = vtypes.Ands(ready, writeval)
readval = vtypes.Ands(vtypes.Not(readvalid), vtypes.Not(writevalid),
prev_arvalid, vtypes.Not(prev_awvalid))
if ready is not None:
readval = vtypes.Ands(ready, readval)
_connect_ready(self.waddr.awready._get_module(),
self.waddr.awready, writeval)
_connect_ready(self.raddr.arready._get_module(),
self.raddr.arready, readval)
self.seq(
writevalid(0),
readvalid(0)
)
self.seq.If(write_ack)(
addr(self.waddr.awaddr),
writevalid(1)
).Elif(read_ack)(
addr(self.raddr.araddr),
readvalid(1)
)
return addr, readvalid, writevalid
def pull_write_request(self, cond=None):
"""
@return addr, valid
"""
if self._write_disabled:
raise TypeError('Write disabled.')
ready = make_condition(cond)
ack = vtypes.Ands(self.waddr.awready, self.waddr.awvalid,
vtypes.Not(self.wresp.bvalid))
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
valid = self.m.TmpReg(initval=0, prefix='valid')
prev_awvalid = self.m.TmpReg(initval=0, prefix='prev_awvalid')
self.seq(
prev_awvalid(self.waddr.awvalid)
)
val = (vtypes.Ands(vtypes.Not(valid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid) if ready is None else
vtypes.Ands(ready, vtypes.Not(valid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid))
_connect_ready(self.waddr.awready._get_module(),
self.waddr.awready, val)
self.seq.If(ack)(
addr(self.waddr.awaddr),
)
self.seq(
valid(ack)
)
return addr, valid
def pull_write_data(self, cond=None):
"""
@return data, mask, valid
"""
if self._write_disabled:
raise TypeError('Write disabled.')
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.wdata.wready._get_module(), self.wdata.wready, val)
ack = vtypes.Ands(self.wdata.wready, self.wdata.wvalid)
data = self.wdata.wdata
mask = self.wdata.wstrb
valid = ack
return data, mask, valid
def pull_write_dataflow(self, counter=None, cond=None):
"""
@return data, mask, last, done
"""
raise TypeError('lite interface support no dataflow operation.')
def pull_read_request(self, cond=None):
"""
@return addr, valid
"""
if self._read_disabled:
raise TypeError('Read disabled.')
ready = make_condition(cond)
ack = vtypes.Ands(self.raddr.arready, self.raddr.arvalid)
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
valid = self.m.TmpReg(initval=0, prefix='valid')
prev_arvalid = self.m.TmpReg(initval=0, prefix='prev_arvalid')
self.seq(
prev_arvalid(self.raddr.arvalid)
)
val = (vtypes.Ands(vtypes.Not(valid), prev_arvalid) if ready is None else
vtypes.Ands(ready, vtypes.Not(valid), prev_arvalid))
_connect_ready(self.raddr.arready._get_module(),
self.raddr.arready, val)
self.seq.If(ack)(
addr(self.raddr.araddr)
)
self.seq(
valid(ack)
)
return addr, valid
def push_read_data(self, data, cond=None):
"""
@return ack, valid
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.rdata.rready, vtypes.Not(self.rdata.rvalid))
valid = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
self.seq.If(ack)(
self.rdata.rdata(data),
self.rdata.rvalid(1)
)
# de-assert
self.seq.Delay(1)(
self.rdata.rvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.rdata.rvalid, vtypes.Not(self.rdata.rready)))(
self.rdata.rvalid(self.rdata.rvalid)
)
return ack, valid
def push_read_dataflow(self, data, counter=None, cond=None):
"""
@return done
"""
raise TypeError('lite interface support no dataflow operation.')
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
awaddr = ports['_'.join([name, 'awaddr'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
self.waddr.awaddr.connect(awaddr)
self.waddr.awcache.connect(awcache)
self.waddr.awprot.connect(awprot)
self.waddr.awvalid.connect(awvalid)
awready.connect(self.waddr.awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
self.wdata.wdata.connect(wdata)
self.wdata.wstrb.connect(wstrb)
self.wdata.wvalid.connect(wvalid)
wready.connect(self.wdata.wready)
bresp = ports['_'.join([name, 'bresp'])]
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
bresp.connect(self.wresp.bresp)
bvalid.connect(self.wresp.bvalid)
self.wresp.bready.connect(bready)
araddr = ports['_'.join([name, 'araddr'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
self.raddr.araddr.connect(araddr)
self.raddr.arcache.connect(arcache)
self.raddr.arprot.connect(arprot)
self.raddr.arvalid.connect(arvalid)
arready.connect(self.raddr.arready)
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
rdata.connect(self.rdata.rdata)
rresp.connect(self.rdata.rresp)
rvalid.connect(self.rdata.rvalid)
self.rdata.rready.connect(rready)
class AxiStreamIn(object):
def __init__(self, m, name, clk, rst, datawidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
noio=False, nodataflow=False):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.noio = noio
if not hasattr(self.m, 'streaminbus'):
self.m.streaminbus = []
self.m.streaminbus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Wire if noio else None
self.tdata = AxiStreamInData(m, name, datawidth,
with_last, with_strb,
id_width, user_width, dest_width,
itype, otype)
self.seq = Seq(m, name, clk, rst)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
def read_data(self, cond=None):
"""
@return data, last, _id, user, dest, valid
"""
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.tdata.tready._get_module(), self.tdata.tready, val)
ack = vtypes.Ands(self.tdata.tready, self.tdata.tvalid)
data = self.tdata.tdata
valid = ack
last = self.tdata.tlast
_id = self.tdata.tid
user = self.tdata.tuser
dest = self.tdata.tdest
return data, last, _id, user, dest, valid
def read_dataflow(self, cond=None, point=0, signed=True):
"""
@return data, last, _id, user, dest, done
"""
data_ready = self.m.TmpWire(prefix='data_ready')
last_ready = self.m.TmpWire(prefix='last_ready')
id_ready = self.m.TmpWire(prefix='id_ready')
user_ready = self.m.TmpWire(prefix='user_ready')
dest_ready = self.m.TmpWire(prefix='dest_ready')
data_ready.assign(1)
id_ready.assign(1)
last_ready.assign(1)
user_ready.assign(1)
dest_ready.assign(1)
if cond is None:
cond = (data_ready, last_ready, id_ready, user_ready, dest_ready)
elif isinstance(cond, (tuple, list)):
cond = tuple(list(cond) + [data_ready, last_ready, id_ready, user_ready, dest_ready])
else:
cond = (cond, data_ready, last_ready, id_ready, user_ready, dest_ready)
ready = make_condition(*cond)
val = 1 if ready is None else ready
_connect_ready(self.tdata.tready._get_module(), self.tdata.tready, val)
ack = vtypes.Ands(self.tdata.tready, self.tdata.tvalid)
data = self.tdata.tdata
valid = self.tdata.tvalid
_id = self.tdata.tid
last = self.tdata.tlast
user = self.tdata.tuser
dest = self.tdata.tdest
df = self.df if self.df is not None else _df
df_data = df.Variable(data, valid, data_ready,
width=self.datawidth, point=point, signed=signed)
if last is not None:
df_last = df.Variable(last, valid, last_ready, width=1, signed=False)
done = vtypes.Ands(last, self.tdata.tvalid, self.tdata.tready)
else:
df_last = None
done = vtypes.Ands(self.tdata.tvalid, self.tdata.tready)
if _id is not None:
df_id = df.Variable(_id, valid, id_ready, width=_id.width, signed=False)
else:
df_id = None
if user is not None:
df_user = df.Variable(user, valid, user_ready, width=user.width, signed=False)
else:
df_user = None
if dest is not None:
df_dest = df.Variable(dest, valid, dest_ready, width=dest.width, signed=False)
else:
df_dest = None
return df_data, df_last, df_id, df_user, df_dest, done
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = ports['_'.join([name, 'tdata'])]
tvalid = ports['_'.join([name, 'tvalid'])]
tready = ports['_'.join([name, 'tready'])]
if '_'.join([name, 'tlast']) in ports:
tlast = ports['_'.join([name, 'tlast'])]
else:
tlast = None
if '_'.join([name, 'tid']) in ports:
tid = ports['_'.join([name, 'tid'])]
else:
tid = None
if '_'.join([name, 'tuser']) in ports:
tuser = ports['_'.join([name, 'tuser'])]
else:
tuser = None
if '_'.join([name, 'tdest']) in ports:
tdest = ports['_'.join([name, 'tdest'])]
else:
tdest = None
self.tdata.tdata.connect(tdata)
self.tdata.tvalid.connect(tvalid)
tready.connect(self.tdata.tready)
if self.tdata.tlast is not None:
self.tdata.tlast.connect(tlast if tlast is not None else 1)
if self.tdata.tid is not None:
self.tdata.tid.connect(tid if tid is not None else 0)
if self.tdata.tuser is not None:
self.tdata.tuser.connect(tuser if tuser is not None else 0)
if self.tdata.tdest is not None:
self.tdata.tdest.connect(tdest if tdest is not None else 0)
def connect_stream(self, stream):
if not isinstance(stream, AxiStreamOut):
raise TypeError('stream must be an instance of AxiStreamOut.')
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = stream.tdata.tdata
tvalid = stream.tdata.tvalid
tready = stream.tdata.tready
if stream.tdata.tlast is not None:
tlast = stream.tdata.tlast
else:
tlast = None
if stream.tdata.tid is not None:
tid = stream.tdata.tid
else:
tid = None
if stream.tdata.tuser is not None:
tuser = stream.tdata.tuser
else:
tuser = None
if stream.tdata.tdest is not None:
tdest = stream.tdata.tdest
else:
tdest = None
self.tdata.tdata.connect(tdata)
self.tdata.tvalid.connect(tvalid)
tready.connect(self.tdata.tready)
if self.tdata.tlast is not None:
self.tdata.tlast.connect(tlast if tlast is not None else 1)
if self.tdata.tid is not None:
self.tdata.tid.connect(tid if tid is not None else 0)
if self.tdata.tuser is not None:
self.tdata.tuser.connect(tuser if tuser is not None else 0)
if self.tdata.tdest is not None:
self.tdata.tdest.connect(tdest if tdest is not None else 0)
def connect_master_rdata(self, master):
if not isinstance(master, AxiMaster):
raise TypeError('master must be an instance of AxiMaster.')
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = master.rdata.rdata
tvalid = master.rdata.rvalid
tready = master.rdata.rready
tlast = 0
if master.rdata.rid is not None:
tid = master.rdata.rid
else:
tid = None
if master.rdata.ruser is not None:
tuser = master.rdata.ruser
else:
tuser = None
tdest = None
self.tdata.tdata.connect(tdata)
self.tdata.tvalid.connect(tvalid)
tready.connect(self.tdata.tready)
if self.tdata.tlast is not None:
self.tdata.tlast.connect(tlast if tlast is not None else 1)
if self.tdata.tid is not None:
self.tdata.tid.connect(tid if tid is not None else 0)
if self.tdata.tuser is not None:
self.tdata.tuser.connect(tuser if tuser is not None else 0)
if self.tdata.tdest is not None:
self.tdata.tdest.connect(tdest if tdest is not None else 0)
class AxiStreamOut(object):
def __init__(self, m, name, clk, rst, datawidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
noio=False, nodataflow=False):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.noio = noio
if not hasattr(self.m, 'streamoutbus'):
self.m.streamoutbus = []
self.m.streamoutbus.append(self)
itype = util.t_Reg if noio else None
otype = util.t_Wire if noio else None
self.tdata = AxiStreamOutData(m, name, datawidth,
with_last, with_strb,
id_width, user_width, dest_width,
itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
if self.tdata.tuser is not None:
self.tdata.tuser.assign(0)
if self.tdata.tid is not None:
self.tdata.tid.assign(0)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
def write_data(self, data, last=None, _id=None, user=None, dest=None, cond=None):
"""
@return ack
"""
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.tdata.tready, vtypes.Not(self.tdata.tvalid))
self.seq.If(ack)(
self.tdata.tdata(data),
self.tdata.tvalid(1),
self.tdata.tlast(last) if self.tdata.tlast is not None else (),
self.tdata.tid(_id) if self.tdata.tid is not None else (),
self.tdata.tuser(user) if self.tdata.tuser is not None else (),
self.tdata.tdest(dest) if self.tdata.tdest is not None else (),
)
# de-assert
self.seq.Delay(1)(
self.tdata.tvalid(0),
self.tdata.tlast(0) if self.tdata.tlast is not None else ()
)
# retry
self.seq.If(vtypes.Ands(self.tdata.tvalid, vtypes.Not(self.tdata.tready)))(
self.tdata.tvalid(self.tdata.tvalid),
self.tdata.tlast(self.tdata.tlast) if self.tdata.tlast is not None else ()
)
return ack
def write_dataflow(self, data, last=None, _id=None, user=None, dest=None, cond=None, when=None):
"""
@return ack
'data', 'last', '_id', 'user', 'dest', and 'when' must be dataflow variables
"""
ack = vtypes.Ors(self.tdata.tready, vtypes.Not(self.tdata.tvalid))
if cond is None:
cond = ack
else:
cond = (cond, ack)
args = [data]
last_index = 0
id_index = 0
user_index = 0
dest_index = 0
when_index = 0
if last is not None:
args.append(last)
last_index = len(args) - 1
if _id is not None:
args.append(_id)
id_index = len(args) - 1
if user is not None:
args.append(user)
user_index = len(args) - 1
if dest is not None:
args.append(dest)
dest_index = len(args) - 1
if when is not None:
args.append(when)
when_index = len(args) - 1
data_list, raw_valid = read_multi(self.m, *args, cond=cond)
raw_data = data_list[0]
raw_last = data_list[last_index] if last_index > 0 else None
raw_id = data_list[id_index] if id_index > 0 else None
raw_user = data_list[user_index] if user_index > 0 else None
raw_dest = data_list[dest_index] if dest_index > 0 else None
raw_when = data_list[when_index] if when_index > 0 else None
when_cond = make_condition(raw_when, ready=cond)
if when_cond is not None:
raw_valid = vtypes.Ands(when_cond, raw_valid)
# write condition
self.seq.If(raw_valid)
self.seq.If(ack)(
self.tdata.tdata(raw_data),
self.tdata.tvalid(1),
self.tdata.tlast(raw_last) if self.tdata.tlast is not None else (),
self.tdata.tid(raw_id) if self.tdata.tid is not None else (),
self.tdata.tuser(raw_user) if self.tdata.tuser is not None else (),
self.tdata.tdest(raw_dest) if self.tdata.tdest is not None else (),
)
# de-assert
self.seq.Delay(1)(
self.tdata.tvalid(0),
self.tdata.tlast(0)
)
# retry
self.seq.If(vtypes.Ands(self.tdata.tvalid, vtypes.Not(self.tdata.tready)))(
self.tdata.tvalid(self.tdata.tvalid),
self.tdata.tlast(self.tdata.tlast) if self.tdata.tlast is not None else ()
)
ack = vtypes.Ands(self.tdata.tvalid, self.tdata.tready)
return ack
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = ports['_'.join([name, 'tdata'])]
tvalid = ports['_'.join([name, 'tvalid'])]
tready = ports['_'.join([name, 'tready'])]
if '_'.join([name, 'tlast']) in ports:
tlast = ports['_'.join([name, 'tlast'])]
else:
tlast = None
if '_'.join([name, 'tid']) in ports:
tid = ports['_'.join([name, 'tid'])]
else:
tid = None
if '_'.join([name, 'tuser']) in ports:
tuser = ports['_'.join([name, 'tuser'])]
else:
tuser = None
if '_'.join([name, 'tdest']) in ports:
tdest = ports['_'.join([name, 'tdest'])]
else:
tdest = None
tdata.connect(self.tdata.tdata)
tvalid.connect(self.tdata.tvalid)
self.tdata.tready.connect(tready)
if tlast is not None:
tlast.connect(self.tdata.tlast if self.tdata.tlast is not None else 1)
if tuser is not None:
tuser.connect(self.tdata.tuser if self.tdata.tuser is not None else 0)
if tid is not None:
tid.connect(self.tdata.tid if self.tdata.tid is not None else 0)
if tdest is not None:
tdest.connect(self.tdata.tdest if self.tdata.tdest is not None else 0)
def connect_stream(self, stream):
if not isinstance(stream, AxiStreamIn):
raise TypeError('stream must be an instance of AxiStreamIn.')
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = stream.tdata.tdata
tvalid = stream.tdata.tvalid
tready = stream.tdata.tready
if stream.tdata.tlast is not None:
tlast = stream.tdata.tlast
else:
tlast = None
if stream.tdata.tid is not None:
tid = stream.tdata.tid
else:
tid = None
if stream.tdata.tuser is not None:
tuser = stream.tdata.tuser
else:
tuser = None
if stream.tdata.tdest is not None:
tdest = stream.tdata.tdest
else:
tdest = None
tdata.connect(self.tdata.tdata)
tvalid.connect(self.tdata.tvalid)
self.tdata.tready.connect(tready)
if tlast is not None:
tlast.connect(self.tdata.tlast if self.tdata.tlast is not None else 1)
if tuser is not None:
tuser.connect(self.tdata.tuser if self.tdata.tuser is not None else 0)
if tid is not None:
tid.connect(self.tdata.tid if self.tdata.tid is not None else 0)
if tdest is not None:
tdest.connect(self.tdata.tdest if self.tdata.tdest is not None else 0)
class AxiMemoryModel(AxiSlave):
__intrinsics__ = ('read', 'write',
'read_word', 'write_word')
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
mem_datawidth=32, mem_addrwidth=20,
memimg=None, memimg_name=None,
memimg_datawidth=None,
write_delay=10, read_delay=10, sleep=4, sub_sleep=4,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
wresp_user_mode=xUSER_DEFAULT,
rdata_user_mode=xUSER_DEFAULT):
if mem_datawidth % 8 != 0:
raise ValueError('mem_datawidth must be a multiple of 8')
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = True
self.mem_datawidth = mem_datawidth
self.mem_addrwidth = mem_addrwidth
itype = util.t_Reg
otype = util.t_Wire
self.waddr = AxiSlaveWriteAddress(m, name, datawidth, addrwidth,
waddr_id_width, waddr_user_width, itype, otype)
self.wdata = AxiSlaveWriteData(m, name, datawidth, addrwidth,
wdata_id_width, wdata_user_width, itype, otype)
self.wresp = AxiSlaveWriteResponse(m, name, datawidth, addrwidth,
wresp_id_width, wresp_user_width, itype, otype)
self.raddr = AxiSlaveReadAddress(m, name, datawidth, addrwidth,
raddr_id_width, raddr_user_width, itype, otype)
self.rdata = AxiSlaveReadData(m, name, datawidth, addrwidth,
rdata_id_width, rdata_user_width, itype, otype)
# default values
self.wresp.bresp.assign(0)
if self.wresp.buser is not None:
self.wresp.buser.assign(wresp_user_mode)
self.rdata.rresp.assign(0)
if self.rdata.ruser is not None:
self.rdata.ruser.assign(rdata_user_mode)
self.fsm = FSM(self.m, '_'.join(['', self.name, 'fsm']), clk, rst)
self.seq = self.fsm.seq
# write response
if self.wresp.bid is not None:
self.seq.If(self.waddr.awvalid, self.waddr.awready,
vtypes.Not(self.wresp.bvalid))(
self.wresp.bid(self.waddr.awid if self.waddr.awid is not None else 0)
)
if self.rdata.rid is not None:
self.seq.If(self.raddr.arvalid, self.raddr.arready)(
self.rdata.rid(self.raddr.arid if self.raddr.arid is not None else 0)
)
self.seq.If(self.wresp.bvalid, self.wresp.bready)(
self.wresp.bvalid(0)
)
self.seq.If(self.wdata.wvalid, self.wdata.wready, self.wdata.wlast)(
self.wresp.bvalid(1)
)
if memimg is None:
if memimg_name is None:
memimg_name = '_'.join(['', self.name, 'memimg', '.out'])
size = 2 ** self.mem_addrwidth
width = self.mem_datawidth
self._make_img(memimg_name, size, width)
elif isinstance(memimg, str):
memimg_name = memimg
num_words = sum(1 for line in open(memimg, 'r'))
# resize mem_addrwidth according to the memimg size
self.mem_addrwidth = max(self.mem_addrwidth,
int(math.ceil(math.log(num_words, 2))))
else:
if memimg_datawidth is None:
memimg_datawidth = mem_datawidth
if memimg_name is None:
memimg_name = '_'.join(['', self.name, 'memimg', '.out'])
num_words = to_memory_image(memimg_name, memimg, datawidth=memimg_datawidth)
# resize mem_addrwidth according to the memimg size
self.mem_addrwidth = max(self.mem_addrwidth,
int(math.ceil(math.log(num_words, 2))))
self.mem = self.m.Reg(
'_'.join(['', self.name, 'mem']), 8, vtypes.Int(2) ** self.mem_addrwidth)
self.m.Initial(
vtypes.Systask('readmemh', memimg_name, self.mem)
)
self._make_fsm(write_delay, read_delay, sleep, sub_sleep)
@staticmethod
def _make_img(filename, size, width, blksize=4096):
import numpy as np
wordsize = width // 8
zero = np.zeros([size // wordsize, wordsize], dtype=np.int64)
base = np.arange(size // wordsize, dtype=np.int64).reshape([-1, 1])
shamt = np.arange(wordsize, dtype=np.int64) * [8]
mask = np.full([1], 2 ** 8 - 1, dtype=np.int64)
data = (((zero + base) >> shamt) & mask).reshape([-1])
fmt = '%02x\n'
with open(filename, 'w') as f:
for i in range(0, len(data), blksize):
blk = data[i:i + blksize]
s = ''.join([fmt % d for d in blk])
f.write(s)
def _make_fsm(self, write_delay=10, read_delay=10, sleep=4, sub_sleep=4):
write_mode = 100
read_mode = 200
while read_mode <= write_mode + write_delay + 10:
read_mode += 100
self.fsm.If(self.waddr.awvalid).goto(write_mode)
self.fsm.If(self.raddr.arvalid).goto(read_mode)
write_count = self.m.Reg(
'_'.join(['', 'write_count']), self.addrwidth + 1, initval=0)
write_addr = self.m.Reg(
'_'.join(['', 'write_addr']), self.addrwidth, initval=0)
read_count = self.m.Reg(
'_'.join(['', 'read_count']), self.addrwidth + 1, initval=0)
read_addr = self.m.Reg(
'_'.join(['', 'read_addr']), self.addrwidth, initval=0)
if sleep > 0:
sleep_count = self.m.Reg(
'_'.join(['', 'sleep_count']), self.addrwidth + 1, initval=0)
if sub_sleep > 0:
sub_sleep_count = self.m.Reg(
'_'.join(['', 'sub_sleep_count']), self.addrwidth + 1, initval=0)
self.seq.If(sleep_count == sleep - 1)(
sub_sleep_count.inc()
)
self.seq.If(sleep_count == sleep - 1,
sub_sleep_count == sub_sleep - 1)(
sub_sleep_count(0)
)
cond = sub_sleep_count == sub_sleep - 1
else:
cond = None
self.seq.If(sleep_count < sleep - 1)(
sleep_count.inc()
)
self.seq.If(cond, sleep_count == sleep - 1)(
sleep_count(0)
)
# write mode
self.fsm._set_index(write_mode)
# awvalid and awready
self.fsm.If(self.waddr.awvalid, vtypes.Not(self.wresp.bvalid))(
self.waddr.awready(1),
write_addr(self.waddr.awaddr),
write_count(self.waddr.awlen + 1)
)
self.fsm.Delay(1)(
self.waddr.awready(0)
)
self.fsm.If(vtypes.Not(self.waddr.awvalid)).goto_init()
self.fsm.If(self.waddr.awvalid).goto_next()
# delay
for _ in range(write_delay):
self.fsm.goto_next()
# wready
self.fsm(
self.wdata.wready(1)
)
self.fsm.goto_next()
# wdata -> mem
for i in range(int(self.datawidth / 8)):
self.fsm.If(self.wdata.wvalid, self.wdata.wstrb[i])(
self.mem[write_addr + i](self.wdata.wdata[i * 8:i * 8 + 8])
)
self.fsm.If(self.wdata.wvalid, self.wdata.wready)(
write_addr.add(int(self.datawidth / 8)),
write_count.dec()
)
# sleep
if sleep > 0:
self.fsm.If(sleep_count == sleep - 1)(
self.wdata.wready(0)
).Else(
self.wdata.wready(1)
)
# write complete
self.fsm.If(self.wdata.wvalid, self.wdata.wready, write_count == 1)(
self.wdata.wready(0)
)
self.fsm.Then().goto_init()
# read mode
self.fsm._set_index(read_mode)
# arvalid and arready
self.fsm.If(self.raddr.arvalid)(
self.raddr.arready(1),
read_addr(self.raddr.araddr),
read_count(self.raddr.arlen + 1)
)
self.fsm.Delay(1)(
self.raddr.arready(0)
)
self.fsm.If(vtypes.Not(self.raddr.arvalid)).goto_init()
self.fsm.If(self.raddr.arvalid).goto_next()
# delay
for _ in range(read_delay):
self.fsm.goto_next()
# mem -> rdata
for i in range(int(self.datawidth / 8)):
self.fsm.If(vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rdata[i * 8:i * 8 + 8](self.mem[read_addr + i])
)
if sleep > 0:
self.fsm.If(sleep_count < sleep - 1, read_count > 0,
vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rvalid(1),
read_addr.add(int(self.datawidth / 8)),
read_count.dec()
)
self.fsm.If(sleep_count < sleep - 1, read_count == 1,
vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rlast(1)
)
else:
self.fsm.If(read_count > 0,
vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rvalid(1),
read_addr.add(int(self.datawidth / 8)),
read_count.dec()
)
self.fsm.If(read_count == 1,
vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rlast(1)
)
# de-assert
self.fsm.Delay(1)(
self.rdata.rvalid(0),
self.rdata.rlast(0)
)
# retry
self.fsm.If(self.rdata.rvalid, vtypes.Not(self.rdata.rready))(
self.rdata.rvalid(self.rdata.rvalid),
self.rdata.rdata(self.rdata.rdata),
self.rdata.rlast(self.rdata.rlast)
)
# read complete
self.fsm.If(self.rdata.rvalid, self.rdata.rready,
read_count == 0).goto_init()
def read(self, fsm, addr):
""" intrinsic for thread """
cond = fsm.state == fsm.current
rdata = self.m.TmpReg(self.mem_datawidth, initval=0, signed=True, prefix='rdata')
num_bytes = self.mem_datawidth // 8
fsm.If(cond)(
rdata(vtypes.Cat(*reversed([self.mem[addr + i]
for i in range(num_bytes)])))
)
fsm.goto_next()
return rdata
def write(self, fsm, addr, wdata):
""" intrinsic for thread """
cond = fsm.state == fsm.current
num_bytes = self.mem_datawidth // 8
wdata_wire = self.m.TmpWire(self.mem_datawidth, prefix='wdata_wire')
wdata_wire.assign(wdata)
for i in range(num_bytes):
self.seq.If(cond)(
self.mem[addr + i](wdata_wire[i * 8:i * 8 + 8])
)
fsm.goto_next()
return 0
def read_word(self, fsm, word_index, byte_offset, bits=8):
""" intrinsic method word-indexed read """
cond = fsm.state == fsm.current
rdata = self.m.TmpReg(bits, initval=0, signed=True, prefix='rdata')
num_bytes = int(math.ceil(bits / 8))
addr = vtypes.Add(byte_offset,
vtypes.Div(vtypes.Mul(word_index, bits), 8))
shift = word_index * bits % 8
raw_data = vtypes.Cat(*reversed([self.mem[addr + i]
for i in range(num_bytes)]))
fsm.If(cond)(
rdata(raw_data >> shift)
)
fsm.goto_next()
return rdata
def write_word(self, fsm, word_index, byte_offset, wdata, bits=8):
""" intrinsic method word-indexed write """
cond = fsm.state == fsm.current
rdata = self.m.TmpReg(bits, initval=0, signed=True, prefix='rdata')
num_bytes = int(math.ceil(bits / 8))
addr = vtypes.Add(byte_offset,
vtypes.Div(vtypes.Mul(word_index, bits), 8))
shift = word_index * bits % 8
wdata_wire = self.m.TmpWire(bits, prefix='wdata_wire')
wdata_wire.assign(wdata)
mem_data = vtypes.Cat(*reversed([self.mem[addr + i]
for i in range(num_bytes)]))
mem_data_wire = self.m.TmpWire(8 * num_bytes, prefix='mem_data_wire')
mem_data_wire.assign(mem_data)
inv_mask = self.m.TmpWire(8 * num_bytes, prefix='inv_mask')
inv_mask.assign(vtypes.Repeat(vtypes.Int(1, 1), bits) << shift)
mask = self.m.TmpWire(8 * num_bytes, prefix='mask')
mask.assign(vtypes.Unot(inv_mask))
raw_data = vtypes.Or(wdata_wire << shift,
vtypes.And(mem_data_wire, mask))
raw_data_wire = self.m.TmpWire(8 * num_bytes, prefix='raw_data_wire')
raw_data_wire.assign(raw_data)
for i in range(num_bytes):
self.seq.If(cond)(
self.mem[addr + i](raw_data_wire[i * 8:i * 8 + 8])
)
fsm.goto_next()
return 0
class AxiMultiportMemoryModel(AxiMemoryModel):
__intrinsics__ = ('read', 'write',
'read_word', 'write_word')
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32, numports=2,
mem_datawidth=32, mem_addrwidth=20,
memimg=None, memimg_name=None,
memimg_datawidth=None,
write_delay=10, read_delay=10, sleep=4, sub_sleep=4,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
wresp_user_mode=xUSER_DEFAULT,
rdata_user_mode=xUSER_DEFAULT):
if mem_datawidth % 8 != 0:
raise ValueError('mem_datawidth must be a multiple of 8')
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.numports = numports
self.noio = True
self.mem_datawidth = mem_datawidth
self.mem_addrwidth = mem_addrwidth
itype = util.t_Reg
otype = util.t_Wire
self.waddrs = [AxiSlaveWriteAddress(m, name + '_%d' % i, datawidth, addrwidth,
waddr_id_width, waddr_user_width, itype, otype)
for i in range(numports)]
self.wdatas = [AxiSlaveWriteData(m, name + '_%d' % i, datawidth, addrwidth,
wdata_id_width, wdata_user_width, itype, otype)
for i in range(numports)]
self.wresps = [AxiSlaveWriteResponse(m, name + '%d' % i, datawidth, addrwidth,
wresp_id_width, wresp_user_width, itype, otype)
for i in range(numports)]
self.raddrs = [AxiSlaveReadAddress(m, name + '_%d' % i, datawidth, addrwidth,
raddr_id_width, raddr_user_width, itype, otype)
for i in range(numports)]
self.rdatas = [AxiSlaveReadData(m, name + '_%d' % i, datawidth, addrwidth,
rdata_id_width, rdata_user_width, itype, otype)
for i in range(numports)]
# default values
for wresp in self.wresps:
wresp.bresp.assign(0)
if wresp.buser is not None:
wresp.buser.assign(wresp_user_mode)
for rdata in self.rdatas:
rdata.rresp.assign(0)
if rdata.ruser is not None:
rdata.ruser.assign(rdata_user_mode)
self.seq = Seq(self.m, '_'.join(['', self.name, 'seq']), clk, rst)
self.fsms = [FSM(self.m, '_'.join(['', self.name, 'fsm_%d' % i]), clk, rst)
for i in range(numports)]
# all FSM shares an indentical Seq
for fsm in self.fsms:
fsm.seq = self.seq
# write response
for wresp, waddr in zip(self.wresps, self.waddrs):
if wresp.bid is not None:
self.seq.If(waddr.awvalid, waddr.awready,
vtypes.Not(wresp.bvalid))(
wresp.bid(waddr.awid if waddr.awid is not None else 0)
)
for rdata, raddr in zip(self.rdatas, self.raddrs):
if rdata.rid is not None:
self.seq.If(raddr.arvalid, raddr.arready)(
rdata.rid(raddr.arid if raddr.arid is not None else 0)
)
for wresp, wdata in zip(self.wresps, self.wdatas):
self.seq.If(wresp.bvalid, wresp.bready)(
wresp.bvalid(0)
)
self.seq.If(wdata.wvalid, wdata.wready, wdata.wlast)(
wresp.bvalid(1)
)
if memimg is None:
if memimg_name is None:
memimg_name = '_'.join(['', self.name, 'memimg', '.out'])
size = 2 ** self.mem_addrwidth
width = self.mem_datawidth
self._make_img(memimg_name, size, width)
elif isinstance(memimg, str):
memimg_name = memimg
num_words = sum(1 for line in open(memimg, 'r'))
# resize mem_addrwidth according to the memimg size
self.mem_addrwidth = max(self.mem_addrwidth,
int(math.ceil(math.log(num_words, 2))))
else:
if memimg_datawidth is None:
memimg_datawidth = mem_datawidth
if memimg_name is None:
memimg_name = '_'.join(['', self.name, 'memimg', '.out'])
num_words = to_memory_image(memimg_name, memimg, datawidth=memimg_datawidth)
# resize mem_addrwidth according to the memimg size
self.mem_addrwidth = max(self.mem_addrwidth,
int(math.ceil(math.log(num_words, 2))))
self.mem = self.m.Reg(
'_'.join(['', self.name, 'mem']), 8, vtypes.Int(2) ** self.mem_addrwidth)
self.m.Initial(
vtypes.Systask('readmemh', memimg_name, self.mem)
)
self._make_fsms(write_delay, read_delay, sleep, sub_sleep)
def _make_fsms(self, write_delay=10, read_delay=10, sleep=4, sub_sleep=4):
for i, (fsm, waddr, wdata, wresp, raddr, rdata) in enumerate(
zip(self.fsms, self.waddrs, self.wdatas, self.wresps, self.raddrs, self.rdatas)):
write_count = self.m.Reg(
'_'.join(['', 'write_count_%d' % i]), self.addrwidth + 1, initval=0)
write_addr = self.m.Reg(
'_'.join(['', 'write_addr_%d' % i]), self.addrwidth, initval=0)
read_count = self.m.Reg(
'_'.join(['', 'read_count_%d' % i]), self.addrwidth + 1, initval=0)
read_addr = self.m.Reg(
'_'.join(['', 'read_addr_%d' % i]), self.addrwidth, initval=0)
if sleep > 0:
sleep_count = self.m.Reg(
'_'.join(['', 'sleep_count_%d' % i]), self.addrwidth + 1, initval=0)
if sub_sleep > 0:
sub_sleep_count = self.m.Reg(
'_'.join(['', 'sub_sleep_count_%d' % i]), self.addrwidth + 1, initval=0)
fsm.seq.If(sleep_count == sleep - 1)(
sub_sleep_count.inc()
)
fsm.seq.If(sleep_count == sleep - 1,
sub_sleep_count == sub_sleep - 1)(
sub_sleep_count(0)
)
cond = sub_sleep_count == sub_sleep - 1
else:
cond = None
fsm.seq.If(sleep_count < sleep - 1)(
sleep_count.inc()
)
fsm.seq.If(cond, sleep_count == sleep - 1)(
sleep_count(0)
)
write_mode = 100
read_mode = 200
while read_mode <= write_mode + write_delay + 10:
read_mode += 100
fsm.If(waddr.awvalid).goto(write_mode)
fsm.If(raddr.arvalid).goto(read_mode)
# write mode
fsm._set_index(write_mode)
# awvalid and awready
fsm.If(waddr.awvalid, vtypes.Not(wresp.bvalid))(
waddr.awready(1),
write_addr(waddr.awaddr),
write_count(waddr.awlen + 1)
)
fsm.Delay(1)(
waddr.awready(0)
)
fsm.If(vtypes.Not(waddr.awvalid)).goto_init()
fsm.If(waddr.awvalid).goto_next()
# delay
for _ in range(write_delay):
fsm.goto_next()
# wready
fsm(
wdata.wready(1)
)
fsm.goto_next()
# wdata -> mem
for i in range(int(self.datawidth / 8)):
fsm.If(wdata.wvalid, wdata.wstrb[i])(
self.mem[write_addr + i](wdata.wdata[i * 8:i * 8 + 8])
)
fsm.If(wdata.wvalid, wdata.wready)(
write_addr.add(int(self.datawidth / 8)),
write_count.dec()
)
# sleep
if sleep > 0:
fsm.If(sleep_count == sleep - 1)(
wdata.wready(0)
).Else(
wdata.wready(1)
)
# write complete
fsm.If(wdata.wvalid, wdata.wready, write_count == 1)(
wdata.wready(0)
)
fsm.Then().goto_init()
# read mode
fsm._set_index(read_mode)
# arvalid and arready
fsm.If(raddr.arvalid)(
raddr.arready(1),
read_addr(raddr.araddr),
read_count(raddr.arlen + 1)
)
fsm.Delay(1)(
raddr.arready(0)
)
fsm.If(vtypes.Not(raddr.arvalid)).goto_init()
fsm.If(raddr.arvalid).goto_next()
# delay
for _ in range(read_delay):
fsm.goto_next()
# mem -> rdata
for i in range(int(self.datawidth / 8)):
fsm.If(vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rdata[i * 8:i * 8 + 8](self.mem[read_addr + i])
)
if sleep > 0:
fsm.If(sleep_count < sleep - 1, read_count > 0,
vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rvalid(1),
read_addr.add(int(self.datawidth / 8)),
read_count.dec()
)
fsm.If(sleep_count < sleep - 1, read_count == 1,
vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rlast(1)
)
else:
fsm.If(read_count > 0,
vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rvalid(1),
read_addr.add(int(self.datawidth / 8)),
read_count.dec()
)
fsm.If(read_count == 1,
vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rlast(1)
)
# de-assert
fsm.Delay(1)(
rdata.rvalid(0),
rdata.rlast(0)
)
# retry
fsm.If(rdata.rvalid, vtypes.Not(rdata.rready))(
rdata.rvalid(rdata.rvalid),
rdata.rdata(rdata.rdata),
rdata.rlast(rdata.rlast)
)
# read complete
fsm.If(rdata.rvalid, rdata.rready,
read_count == 0).goto_init()
def connect(self, index, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
ports = defaultdict(lambda: None, ports)
if '_'.join([name, 'awid']) in ports:
awid = ports['_'.join([name, 'awid'])]
else:
awid = None
awaddr = ports['_'.join([name, 'awaddr'])]
awlen = ports['_'.join([name, 'awlen'])]
awsize = ports['_'.join([name, 'awsize'])]
awburst = ports['_'.join([name, 'awburst'])]
awlock = ports['_'.join([name, 'awlock'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awqos = ports['_'.join([name, 'awqos'])]
if '_'.join([name, 'awuser']) in ports:
awuser = ports['_'.join([name, 'awuser'])]
else:
awuser = None
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
if self.waddrs[index].awid is not None:
self.waddrs[index].awid.connect(awid if awid is not None else 0)
self.waddrs[index].awaddr.connect(awaddr)
self.waddrs[index].awlen.connect(awlen if awlen is not None else 0)
self.waddrs[index].awsize.connect(awsize if awsize is not None else
int(math.log(self.datawidth // 8)))
self.waddrs[index].awburst.connect(awburst if awburst is not None else BURST_INCR)
self.waddrs[index].awlock.connect(awlock if awlock is not None else 0)
self.waddrs[index].awcache.connect(awcache)
self.waddrs[index].awprot.connect(awprot)
self.waddrs[index].awqos.connect(awqos if awqos is not None else 0)
if self.waddrs[index].awuser is not None:
self.waddrs[index].awuser.connect(awuser if awuser is not None else 0)
self.waddrs[index].awvalid.connect(awvalid)
awready.connect(self.waddrs[index].awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wlast = ports['_'.join([name, 'wlast'])]
if '_'.join([name, 'wuser']) in ports:
wuser = ports['_'.join([name, 'wuser'])]
else:
wuser = None
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
self.wdatas[index].wdata.connect(wdata)
self.wdatas[index].wstrb.connect(wstrb)
self.wdatas[index].wlast.connect(wlast if wlast is not None else 1)
if self.wdatas[index].wuser is not None:
self.wdatas[index].wuser.connect(wuser if wuser is not None else 0)
self.wdatas[index].wvalid.connect(wvalid)
wready.connect(self.wdatas[index].wready)
if '_'.join([name, 'bid']) in ports:
bid = ports['_'.join([name, 'bid'])]
else:
bid = None
bresp = ports['_'.join([name, 'bresp'])]
if '_'.join([name, 'buser']) in ports:
buser = ports['_'.join([name, 'buser'])]
else:
buser = None
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
if bid is not None:
bid.connect(self.wresps[index].bid if self.wresps[index].bid is not None else 0)
bresp.connect(self.wresps[index].bresp)
if buser is not None:
buser.connect(self.wresps[index].buser if self.wresps[index].buser is not None else 0)
bvalid.connect(self.wresps[index].bvalid)
self.wresps[index].bready.connect(bready)
if '_'.join([name, 'arid']) in ports:
arid = ports['_'.join([name, 'arid'])]
else:
arid = None
araddr = ports['_'.join([name, 'araddr'])]
arlen = ports['_'.join([name, 'arlen'])]
arsize = ports['_'.join([name, 'arsize'])]
arburst = ports['_'.join([name, 'arburst'])]
arlock = ports['_'.join([name, 'arlock'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arqos = ports['_'.join([name, 'arqos'])]
if '_'.join([name, 'aruser']) in ports:
aruser = ports['_'.join([name, 'aruser'])]
else:
aruser = None
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
if self.raddrs[index].arid is not None:
self.raddrs[index].arid.connect(arid if arid is not None else 0)
self.raddrs[index].araddr.connect(araddr)
self.raddrs[index].arlen.connect(arlen if arlen is not None else 0)
self.raddrs[index].arsize.connect(arsize if arsize is not None else
int(math.log(self.datawidth // 8)))
self.raddrs[index].arburst.connect(arburst if arburst is not None else BURST_INCR)
self.raddrs[index].arlock.connect(arlock if arlock is not None else 0)
self.raddrs[index].arcache.connect(arcache)
self.raddrs[index].arprot.connect(arprot)
self.raddrs[index].arqos.connect(arqos if arqos is not None else 0)
if self.raddrs[index].aruser is not None:
self.raddrs[index].aruser.connect(aruser if aruser is not None else 0)
self.raddrs[index].arvalid.connect(arvalid)
arready.connect(self.raddrs[index].arready)
if '_'.join([name, 'rid']) in ports:
rid = ports['_'.join([name, 'rid'])]
else:
rid = None
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rlast = ports['_'.join([name, 'rlast'])]
if '_'.join([name, 'ruser']) in ports:
ruser = ports['_'.join([name, 'ruser'])]
else:
ruser = None
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
if rid is not None:
rid.connect(self.rdatas[index].rid if self.rdatas[index].rid is not None else 0)
rdata.connect(self.rdatas[index].rdata)
rresp.connect(self.rdatas[index].rresp)
if rlast is not None:
rlast.connect(self.rdatas[index].rlast)
if ruser is not None:
ruser.connect(self.rdatas[index].ruser if self.rdatas[index].ruser is not None else 0)
rvalid.connect(self.rdatas[index].rvalid)
self.rdatas[index].rready.connect(rready)
def make_memory_image(filename, length, pattern='inc', dtype=None,
datawidth=32, wordwidth=8, endian='little'):
import numpy as np
if dtype is None:
dtype = np.int64
if pattern == 'inc':
l = list(range(length))
array = np.array(l, dtype=dtype)
else:
array = np.zeros([length], dtype=dtype)
to_memory_image(filename, array,
datawidth=datawidth, wordwidth=wordwidth,
endian=endian)
def to_memory_image(filename, array, length=None,
datawidth=32, wordwidth=8, endian='little', blksize=4096):
import numpy as np
if not isinstance(array, np.ndarray):
array = np.array(array)
array = np.reshape(array, [-1])
if not isinstance(array[0], (int, np.int64, np.int32)):
raise TypeError("not supported type: '%s'" %
str(type(array[0])))
if length is not None:
if len(array) > length:
array = array[:length]
elif len(array) < length:
np.append(array, np.zeros([length - len(array)],
dtype=array.dtype))
num_hex = int(math.ceil(wordwidth / 4))
fmt = ''.join(['%0', str(num_hex), 'x\n'])
if datawidth >= wordwidth:
num = int(math.ceil(datawidth / wordwidth))
zero = np.zeros(list(array.shape) + [num], dtype=np.int64)
base = array.reshape([-1, 1])
shamt = np.arange(num, dtype=np.int64) * [wordwidth]
if endian == 'big':
shamt.reverse()
mask = np.full([1], 2 ** wordwidth - 1, dtype=np.int64)
data = (((zero + base) >> shamt) & mask).reshape([-1])
with open(filename, 'w') as f:
for i in range(0, len(data), blksize):
blk = data[i:i + blksize]
s = ''.join([fmt % d for d in blk])
f.write(s)
return len(data)
else:
num = int(math.ceil(wordwidth / datawidth))
base = array.reshape([-1, num])
shamt = np.arange(num, dtype=np.int64) * [datawidth]
if endian == 'big':
shamt.reverse()
mask = np.full([1], 2 ** datawidth - 1, dtype=np.int64)
data = (base.reshape([-1, num]) & mask) << shamt
data = np.bitwise_or.reduce(data, -1).reshape([-1])
with open(filename, 'w') as f:
for i in range(0, len(data), blksize):
blk = data[i:i + blksize]
s = ''.join([fmt % d for d in blk])
f.write(s)
return len(data)
def aligned_shape(shape, datawidth, mem_datawidth):
aligned_shape = list(shape[:])
if datawidth == mem_datawidth or datawidth > mem_datawidth:
return aligned_shape
chunk = mem_datawidth // datawidth
new_size = int(math.ceil(aligned_shape[-1] / chunk)) * chunk
aligned_shape[-1] = new_size
return aligned_shape
def shape_to_length(shape):
return functools.reduce(lambda x, y: x * y, shape, 1)
def shape_to_memory_size(shape, datawidth, mem_datawidth=None, block_size=4096):
if mem_datawidth is not None:
shape = aligned_shape(shape, datawidth, mem_datawidth)
bytes = int(math.ceil(datawidth / 8))
length = shape_to_length(shape)
return ((block_size // bytes) *
int(math.ceil(length / (block_size // bytes))))
def set_memory(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words=None):
if mem_datawidth < src_datawidth:
return _set_memory_narrow(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words)
return _set_memory_wide(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words)
def _set_memory_wide(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words=None):
if mem_datawidth > 64:
raise ValueError('not supported')
import numpy as np
if num_align_words is not None:
src = align(src, num_align_words)
num_pack = int(math.ceil(mem_datawidth / src_datawidth))
src_mask = np.full([1], 2 ** src_datawidth - 1, dtype=np.int64)
mem_mask = np.full([1], 2 ** mem_datawidth - 1, dtype=np.int64)
offset = mem_offset // int(math.ceil(mem_datawidth / 8))
if src.shape[-1] % num_pack != 0:
pads = []
for s in src.shape[:-1]:
pads.append((0, 0))
pads.append((0, num_pack - src.shape[-1]))
src = np.pad(src, pads, 'constant')
masked_data = src.astype(np.int64) & src_mask
pack = np.arange(src.shape[-1], dtype=np.int64) % [num_pack]
shift = [src_datawidth] * pack
v = (masked_data << shift) & mem_mask
v = np.reshape(v, [-1, num_pack])
v = np.bitwise_or.reduce(v, -1)
dst_size = mem[offset:offset + v.shape[-1]].size
if v.size > dst_size:
raise ValueError("""too large source data: """
"""destination size (%d) < source size (%d)""" %
(dst_size, v.size))
mem[offset:offset + v.shape[-1]] = v
def _set_memory_narrow(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words=None):
if mem_datawidth > 64:
raise ValueError('not supported')
import numpy as np
if num_align_words is not None:
src = align(src, num_align_words)
num_pack = int(math.ceil(src_datawidth / mem_datawidth))
src_mask = np.full([1], 2 ** src_datawidth - 1, dtype=np.int64)
mem_mask = np.full([1], 2 ** mem_datawidth - 1, dtype=np.int64)
offset = mem_offset // int(math.ceil(mem_datawidth / 8))
pack = np.arange(num_pack, dtype=np.int64)
shift = [mem_datawidth] * pack
dup_src_based = np.zeros(list(src.shape) + [num_pack], dtype=np.int64)
dup_src = dup_src_based + np.reshape(src, list(src.shape) + [1])
v = dup_src >> shift
v = np.reshape(v, [-1])
v = v & mem_mask
dst_size = mem[offset:offset + v.shape[-1]].size
if v.size > dst_size:
raise ValueError("""too large source data: """
"""destination size (%d) < source size (%d)""" %
(dst_size, v.size))
mem[offset:offset + v.shape[-1]] = v
def align(src, num_align_words):
if num_align_words == 1:
return src
import numpy as np
src_aligned_shape = aligned_shape(src.shape, 1, num_align_words)
ret = np.zeros(src_aligned_shape, dtype=np.int64).reshape([-1])
offset = 0
index = 0
res = num_align_words - src.shape[-1] % num_align_words
for data in src.reshape([-1]):
ret[offset] = data
offset += 1
index += 1
if index == src.shape[-1]:
index = 0
if res < num_align_words:
offset += res
return ret
def split_read_write(m, ports, prefix,
read_prefix='r_', write_prefix='w_'):
# Read (AR, R)
r_ports = {}
for name, port in ports.items():
r_name = read_prefix + port.name
if name.startswith(prefix + '_ar') or name.startswith(prefix + '_r'):
if isinstance(port, vtypes.Reg):
r_port = m.RegLike(port, name=r_name)
port.connect(r_port)
else:
r_port = m.WireLike(port, name=r_name)
r_port.connect(port)
else:
r_port = m.WireLike(port, name=r_name)
if isinstance(port, vtypes.Wire):
r_port.assign(0)
r_ports[r_name] = r_port
# Write (AW, W, B)
w_ports = {}
for name, port in ports.items():
w_name = write_prefix + port.name
if (name.startswith(prefix + '_aw') or
name.startswith(prefix + '_w') or name.startswith(prefix + '_b')):
if isinstance(port, vtypes.Reg):
w_port = m.RegLike(port, name=w_name)
port.connect(w_port)
else:
w_port = m.WireLike(port, name=w_name)
w_port.connect(port)
else:
w_port = m.WireLike(port, name=w_name)
if isinstance(port, vtypes.Wire):
w_port.assign(0)
w_ports[w_name] = w_port
return r_ports, w_ports
| 34.087532 | 100 | 0.557978 | 16,345 | 133,964 | 4.443867 | 0.027225 | 0.0239 | 0.026392 | 0.016466 | 0.85803 | 0.824864 | 0.804061 | 0.767798 | 0.74259 | 0.720644 | 0 | 0.011522 | 0.321676 | 133,964 | 3,929 | 101 | 34.096208 | 0.787798 | 0.016049 | 0 | 0.693955 | 0 | 0 | 0.035888 | 0.000367 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033161 | false | 0.003454 | 0.006908 | 0.001382 | 0.084283 | 0.000345 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2f14f5e23fff3bac38661fe59818c67ccc51a28d | 205 | py | Python | sssgraph/__init__.py | Luxxii/sssgraph_solver | c7c8a41baccb4aba0e70dbbcaaf61bcdcd0c3283 | [
"MIT"
] | null | null | null | sssgraph/__init__.py | Luxxii/sssgraph_solver | c7c8a41baccb4aba0e70dbbcaaf61bcdcd0c3283 | [
"MIT"
] | null | null | null | sssgraph/__init__.py | Luxxii/sssgraph_solver | c7c8a41baccb4aba0e70dbbcaaf61bcdcd0c3283 | [
"MIT"
] | null | null | null | from .sssgraph import create_fully_connected_graph, \
create_partially_fully_connected_graph, \
query_graph, \
query_with_fully_connected_graph, \
query_with_paritally_fully_connected_graph | 41 | 53 | 0.82439 | 25 | 205 | 6.08 | 0.44 | 0.368421 | 0.5 | 0.315789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.131707 | 205 | 5 | 54 | 41 | 0.853933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.2 | 0 | 0.2 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2f32d8ef3ab4e3b739630b1ce164905dc33c60a1 | 27 | py | Python | miopy/__init__.py | icbi-lab/miopy | 1bf23d9c69347070fa6b57f02de9cc1d259f50cc | [
"MIT"
] | null | null | null | miopy/__init__.py | icbi-lab/miopy | 1bf23d9c69347070fa6b57f02de9cc1d259f50cc | [
"MIT"
] | null | null | null | miopy/__init__.py | icbi-lab/miopy | 1bf23d9c69347070fa6b57f02de9cc1d259f50cc | [
"MIT"
] | null | null | null | from .correlation import *
| 13.5 | 26 | 0.777778 | 3 | 27 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 27 | 1 | 27 | 27 | 0.913043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
85dac5b47da7cf3dbbcfa6506247dcb71c6f298a | 395 | py | Python | Codewars/8kyu/formatting-decimal-places-number-0/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codewars/8kyu/formatting-decimal-places-number-0/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codewars/8kyu/formatting-decimal-places-number-0/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python - 3.6.0
Test.describe('two_decimal_places')
Test.it('works for some examples')
Test.assert_equals(two_decimal_places(4.659725356), 4.66, "didn't work for 4.659725356")
Test.assert_equals(two_decimal_places(173735326.3783732637948948), 173735326.38, "didn't work for 173735326.3783732637948948")
Test.assert_equals(two_decimal_places(4.653725356), 4.65, "didn't work for 4.653725356")
| 43.888889 | 126 | 0.792405 | 63 | 395 | 4.793651 | 0.428571 | 0.13245 | 0.211921 | 0.188742 | 0.410596 | 0.324503 | 0.218543 | 0 | 0 | 0 | 0 | 0.300546 | 0.073418 | 395 | 8 | 127 | 49.375 | 0.52459 | 0.035443 | 0 | 0 | 0 | 0 | 0.361478 | 0.068602 | 0 | 0 | 0 | 0 | 0.6 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c80fc399f2da0620ff979ccee9182cff5e1e1475 | 10,975 | py | Python | bot/reviewbot/tools/tests/test_cppcheck.py | reviewboard/ReviewBot | 6c529706229da647cc8cdef27db75cebc0abf216 | [
"MIT"
] | 91 | 2015-04-30T21:00:40.000Z | 2022-03-30T07:19:03.000Z | bot/reviewbot/tools/tests/test_cppcheck.py | reviewboard/ReviewBot | 6c529706229da647cc8cdef27db75cebc0abf216 | [
"MIT"
] | 11 | 2015-01-08T13:48:21.000Z | 2018-07-03T13:18:35.000Z | bot/reviewbot/tools/tests/test_cppcheck.py | reviewboard/ReviewBot | 6c529706229da647cc8cdef27db75cebc0abf216 | [
"MIT"
] | 23 | 2015-04-03T17:17:00.000Z | 2022-03-07T08:14:27.000Z | """Unit tests for reviewbot.tools.cppcheck."""
from __future__ import unicode_literals
import os
import kgb
import six
from reviewbot.tools.cppcheck import CPPCheckTool
from reviewbot.tools.testing import (BaseToolTestCase,
ToolTestCaseMetaclass,
integration_test,
simulation_test)
from reviewbot.utils.filesystem import tmpdirs
from reviewbot.utils.process import execute
@six.add_metaclass(ToolTestCaseMetaclass)
class CPPCheckToolTests(BaseToolTestCase):
"""Unit tests for reviewbot.tools.cppcheck.CPPCheckTool."""
tool_class = CPPCheckTool
tool_exe_config_key = 'cppcheck'
tool_exe_path = '/path/to/cppcheck'
sample_cpp_code = (
b'template <int i>\n'
b'int test() {\n'
b' int buf[10];\n'
b' buf[100] = 0;\n'
b'\n'
b' return test<i + 1>();\n'
b'}\n'
b'\n'
b'int main() {\n'
b' return test<0>();\n'
b'\n'
b' int i = 42;\n'
b'}'
)
@integration_test()
@simulation_test(output=(
"4::8::error::arrayIndexOutOfBounds::Array 'buf[10]' accessed at "
"index 100, which is out of bounds.\n"
))
def test_execute(self):
"""Testing CPPCheckTool.execute"""
review, review_file = self.run_tool_execute(
filename='test.cc',
file_contents=self.sample_cpp_code)
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 4,
'num_lines': 1,
'text': (
"Array 'buf[10]' accessed at index 100, which is out "
"of bounds.\n"
"\n"
"Column: 8\n"
"Severity: error\n"
"Error code: arrayIndexOutOfBounds"
),
'issue_opened': True,
'rich_text': False,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-q',
'--template={line}::{column}::{severity}::{id}::{message}',
os.path.join(tmpdirs[-1], 'test.cc'),
],
ignore_errors=True)
@integration_test()
@simulation_test(output=(
"4::8::error::arrayIndexOutOfBounds::Array 'buf[10]' accessed at "
"index 100, which is out of bounds.\n"
"12::11::style::unreadVariable::Variable 'i' is assigned a value that "
"is never used.\n"
"4::14::style::unreadVariable::Variable 'buf[100]' is assigned a "
"value that is never used.\n"
))
def test_execute_with_style_checks_enabled(self):
"""Testing CPPCheckTool.execute with style_checks_enabled setting"""
review, review_file = self.run_tool_execute(
filename='test.cc',
file_contents=self.sample_cpp_code,
tool_settings={
'style_checks_enabled': True,
})
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 4,
'num_lines': 1,
'text': (
"Array 'buf[10]' accessed at index 100, which is out "
"of bounds.\n"
"\n"
"Column: 8\n"
"Severity: error\n"
"Error code: arrayIndexOutOfBounds"
),
'issue_opened': True,
'rich_text': False,
},
{
'filediff_id': review_file.id,
'first_line': 12,
'num_lines': 1,
'text': (
"Variable 'i' is assigned a value that is never used.\n"
"\n"
"Column: 11\n"
"Severity: style\n"
"Error code: unreadVariable"
),
'issue_opened': True,
'rich_text': False,
},
{
'filediff_id': review_file.id,
'first_line': 4,
'num_lines': 1,
'text': (
"Variable 'buf[100]' is assigned a value that is never "
"used.\n"
"\n"
"Column: 14\n"
"Severity: style\n"
"Error code: unreadVariable"
),
'issue_opened': True,
'rich_text': False,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-q',
'--template={line}::{column}::{severity}::{id}::{message}',
'--enable=style',
os.path.join(tmpdirs[-1], 'test.cc'),
],
ignore_errors=True)
@integration_test()
@simulation_test(output=(
"6::12::information::templateRecursion::TemplateSimplifier: max "
"template recursion (100) reached for template 'test<101>'. You might "
"want to limit Cppcheck recursion.\n"
"4::8::error::arrayIndexOutOfBounds::Array 'buf[10]' accessed at "
"index 100, which is out of bounds.\n"
"12::11::style::unreadVariable::Variable 'i' is assigned a value that "
"is never used.\n"
"4::14::style::unreadVariable::Variable 'buf[100]' is assigned a "
"value that is never used.\n"
))
def test_execute_with_all_checks_enabled(self):
"""Testing CPPCheckTool.execute with all_checks_enabled setting"""
review, review_file = self.run_tool_execute(
filename='test.cc',
file_contents=self.sample_cpp_code,
tool_settings={
'all_checks_enabled': True,
})
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 6,
'num_lines': 1,
'text': (
"TemplateSimplifier: max template recursion (100) "
"reached for template 'test<101>'. You might want to "
"limit Cppcheck recursion.\n"
"\n"
"Column: 12\n"
"Severity: information\n"
"Error code: templateRecursion"
),
'issue_opened': True,
'rich_text': False,
},
{
'filediff_id': review_file.id,
'first_line': 4,
'num_lines': 1,
'text': (
"Array 'buf[10]' accessed at index 100, which is out "
"of bounds.\n"
"\n"
"Column: 8\n"
"Severity: error\n"
"Error code: arrayIndexOutOfBounds"
),
'issue_opened': True,
'rich_text': False,
},
{
'filediff_id': review_file.id,
'first_line': 12,
'num_lines': 1,
'text': (
"Variable 'i' is assigned a value that is never used.\n"
"\n"
"Column: 11\n"
"Severity: style\n"
"Error code: unreadVariable"
),
'issue_opened': True,
'rich_text': False,
},
{
'filediff_id': review_file.id,
'first_line': 4,
'num_lines': 1,
'text': (
"Variable 'buf[100]' is assigned a value that is never "
"used.\n"
"\n"
"Column: 14\n"
"Severity: style\n"
"Error code: unreadVariable"
),
'issue_opened': True,
'rich_text': False,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-q',
'--template={line}::{column}::{severity}::{id}::{message}',
'--enable=all',
os.path.join(tmpdirs[-1], 'test.cc'),
],
ignore_errors=True)
@integration_test()
@simulation_test(output=(
"6::22::error::syntaxError::syntax error: >()\n"
))
def test_execute_with_force_language(self):
"""Testing CPPCheckTool.execute with force_language setting"""
review, review_file = self.run_tool_execute(
filename='test.cc',
file_contents=self.sample_cpp_code,
tool_settings={
'force_language': 'c',
})
self.assertEqual(review.comments, [
{
'filediff_id': review_file.id,
'first_line': 6,
'num_lines': 1,
'text': (
"syntax error: >()\n"
"\n"
"Column: 22\n"
"Severity: error\n"
"Error code: syntaxError"
),
'issue_opened': True,
'rich_text': False,
},
])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-q',
'--template={line}::{column}::{severity}::{id}::{message}',
'--language=c',
os.path.join(tmpdirs[-1], 'test.cc'),
],
ignore_errors=True)
@integration_test()
@simulation_test(output='')
def test_execute_with_success(self):
"""Testing CPPCheckTool.execute with no warnings or errors"""
review, review_file = self.run_tool_execute(
filename='test.cc',
file_contents=(
b'int main() {\n'
b' return 0;\n'
b'}\n'
))
self.assertEqual(review.comments, [])
self.assertSpyCalledWith(
execute,
[
self.tool_exe_path,
'-q',
'--template={line}::{column}::{severity}::{id}::{message}',
os.path.join(tmpdirs[-1], 'test.cc'),
],
ignore_errors=True)
def setup_simulation_test(self, output):
"""Set up the simulation test for pyflakes.
This will spy on :py:func:`~reviewbot.utils.process.execute`, making
it return the provided stdout and stderr results.
Args:
output (unicode):
The outputted results from cppcheck.
"""
self.spy_on(execute, op=kgb.SpyOpReturn(output))
| 32.859281 | 79 | 0.458041 | 1,014 | 10,975 | 4.802761 | 0.156805 | 0.005749 | 0.029569 | 0.036961 | 0.777002 | 0.752567 | 0.727105 | 0.707803 | 0.707803 | 0.707803 | 0 | 0.020698 | 0.423326 | 10,975 | 333 | 80 | 32.957958 | 0.748775 | 0.054214 | 0 | 0.722222 | 0 | 0 | 0.304568 | 0.069096 | 0 | 0 | 0 | 0 | 0.034722 | 1 | 0.020833 | false | 0 | 0.027778 | 0 | 0.065972 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.