hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
963cbbfe45462196e9d6936961c8d5e68098ef6e
| 5,405
|
py
|
Python
|
thenewboston_node/blockchain/tests/test_list_blockchain_state_meta.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 30
|
2021-03-05T22:08:17.000Z
|
2021-09-23T02:45:45.000Z
|
thenewboston_node/blockchain/tests/test_list_blockchain_state_meta.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 148
|
2021-03-05T23:37:50.000Z
|
2021-11-02T02:18:58.000Z
|
thenewboston_node/blockchain/tests/test_list_blockchain_state_meta.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 14
|
2021-03-05T21:58:46.000Z
|
2021-10-15T17:27:52.000Z
|
from django.test import override_settings
from rest_framework import status
from thenewboston_node.business_logic.tests.base import as_primary_validator, force_blockchain
API_V1_LIST_BLOCKCHAIN_STATE_URL = '/api/v1/blockchain-states-meta/'
def test_memory_blockchain_supported(api_client, memory_blockchain, primary_validator_key_pair):
with force_blockchain(memory_blockchain):
with override_settings(NODE_SIGNING_KEY=primary_validator_key_pair.private):
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL)
assert response.status_code == status.HTTP_200_OK
def test_can_list_blockchain_state_meta(api_client, file_blockchain_with_two_blockchain_states, pv_network_address):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL)
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
blockchain_state_0, blockchain_state_1 = data['results']
expected = file_blockchain_with_two_blockchain_states.get_first_blockchain_state().last_block_number
assert blockchain_state_0['last_block_number'] == expected
assert blockchain_state_0['url_path'] == (
'/blockchain/blockchain-states/0/0/0/0/0/0/0/0/0000000000000000000!-blockchain-state.msgpack.gz'
)
assert len(blockchain_state_0['urls']) == 1
assert blockchain_state_0['urls'][0] == (
f'{pv_network_address}blockchain/blockchain-states'
'/0/0/0/0/0/0/0/0/0000000000000000000!-blockchain-state.msgpack.gz'
)
assert blockchain_state_1['last_block_number'] == 1
# TODO(dmu) CRITICAL: Stabilize unittests and remove `or`
assert blockchain_state_1['url_path'] == (
'/blockchain/blockchain-states/0/0/0/0/0/0/0/0/00000000000000000001-blockchain-state.msgpack'
) or blockchain_state_1['url_path'] == (
'/blockchain/blockchain-states/0/0/0/0/0/0/0/0/00000000000000000001-blockchain-state.msgpack.gz'
)
assert len(blockchain_state_1['urls']) == 1
assert blockchain_state_1['urls'][0] == (
f'{pv_network_address}blockchain/blockchain-states'
'/0/0/0/0/0/0/0/0/00000000000000000001-blockchain-state.msgpack'
) or blockchain_state_1['urls'][0] == (
f'{pv_network_address}blockchain/blockchain-states'
'/0/0/0/0/0/0/0/0/00000000000000000001-blockchain-state.msgpack.gz'
)
def test_can_sort_ascending_blockchain_states_meta(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?ordering=last_block_number')
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
blockchain_state_0, blockchain_state_1 = data['results']
expected = file_blockchain_with_two_blockchain_states.get_first_blockchain_state().last_block_number
assert blockchain_state_0['last_block_number'] == expected
assert blockchain_state_1['last_block_number'] == 1
def test_can_sort_descending_blockchain_states_meta(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?ordering=-last_block_number')
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
blockchain_state_0, blockchain_state_1 = data['results']
assert blockchain_state_0['last_block_number'] == 1
expected = file_blockchain_with_two_blockchain_states.get_first_blockchain_state().last_block_number
assert blockchain_state_1['last_block_number'] == expected
def test_can_get_blockchain_states_meta_w_limit(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?limit=1')
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
assert len(data['results']) == 1
expected = file_blockchain_with_two_blockchain_states.get_first_blockchain_state().last_block_number
assert data['results'][0]['last_block_number'] == expected
def test_can_get_blockchain_states_meta_w_offset(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?limit=1&offset=1')
assert response.status_code == 200
data = response.json()
assert data['count'] == 2
assert len(data['results']) == 1
assert data['results'][0]['last_block_number'] == 1
def test_pagination_is_applied_after_ordering(api_client, file_blockchain_with_two_blockchain_states):
with force_blockchain(file_blockchain_with_two_blockchain_states), as_primary_validator():
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?offset=1&ordering=-last_block_number')
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data['count'] == 2
assert len(data['results']) == 1
assert data['results'][0]['last_block_number'] == -1
| 45.805085
| 116
| 0.768733
| 739
| 5,405
| 5.194858
| 0.11908
| 0.021881
| 0.028132
| 0.031258
| 0.858036
| 0.848398
| 0.837718
| 0.824694
| 0.780933
| 0.767908
| 0
| 0.051347
| 0.12803
| 5,405
| 117
| 117
| 46.196581
| 0.763208
| 0.010176
| 0
| 0.517647
| 0
| 0.070588
| 0.196896
| 0.137996
| 0
| 0
| 0
| 0.008547
| 0.364706
| 1
| 0.082353
| false
| 0
| 0.035294
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9643f3228870311b5dbe5501cd2989dbcaa65ef0
| 292
|
py
|
Python
|
hes/views.py
|
dwcaraway/homeschoolring
|
6f1dec0eba83c759352c2e39863f2ff28a689c0d
|
[
"BSD-3-Clause"
] | null | null | null |
hes/views.py
|
dwcaraway/homeschoolring
|
6f1dec0eba83c759352c2e39863f2ff28a689c0d
|
[
"BSD-3-Clause"
] | null | null | null |
hes/views.py
|
dwcaraway/homeschoolring
|
6f1dec0eba83c759352c2e39863f2ff28a689c0d
|
[
"BSD-3-Clause"
] | null | null | null |
__author__ = 'dave'
from django.shortcuts import render
def ajax(request, ajax_code):
return render(request=request, template_name="hes/ajax/%s.html" % ajax_code, context={})
def coming_soon(request):
return render(request=request, template_name="hes/coming-soon.html", context={})
| 32.444444
| 92
| 0.75
| 40
| 292
| 5.25
| 0.5
| 0.07619
| 0.180952
| 0.247619
| 0.390476
| 0.390476
| 0.390476
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 292
| 8
| 93
| 36.5
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0.136986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
9663cc391558e78c4587755ac3f3d84861f0e47b
| 110
|
py
|
Python
|
tools/evaluation/__init__.py
|
destinyls/MonoFlex
|
6e85bb16b60b21041621a759cd3fd48d9a783ff9
|
[
"MIT"
] | 86
|
2021-03-24T02:10:17.000Z
|
2022-03-30T03:35:41.000Z
|
tools/evaluation/__init__.py
|
destinyls/MonoFlex
|
6e85bb16b60b21041621a759cd3fd48d9a783ff9
|
[
"MIT"
] | 5
|
2021-06-03T09:23:30.000Z
|
2022-03-30T09:13:26.000Z
|
tools/evaluation/__init__.py
|
destinyls/MonoFlex
|
6e85bb16b60b21041621a759cd3fd48d9a783ff9
|
[
"MIT"
] | 10
|
2021-05-18T04:15:39.000Z
|
2021-11-25T09:32:05.000Z
|
from .kitti_utils import kitti_eval, kitti_eval_coco_style
__all__ = ['kitti_eval_coco_style', 'kitti_eval']
| 27.5
| 58
| 0.818182
| 17
| 110
| 4.529412
| 0.470588
| 0.467532
| 0.337662
| 0.467532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 110
| 3
| 59
| 36.666667
| 0.77
| 0
| 0
| 0
| 0
| 0
| 0.281818
| 0.190909
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
969e87508d1257a77d97f51151a28a9c3293b919
| 1,316
|
py
|
Python
|
Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/2doParcial/ORM.py
|
Arbupa/DAS_Sistemas
|
52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/2doParcial/ORM.py
|
Arbupa/DAS_Sistemas
|
52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/2doParcial/ORM.py
|
Arbupa/DAS_Sistemas
|
52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
import sqlite3
from ObjetoArtista import Artista
def showArt():
try:
conexion = sqlite3.connect('musicBrainzDB.db')
cursor = conexion.cursor()
uMostrar = cursor.execute("SELECT * from Artistas").fetchall()
Art = []
for u in uMostrar:
u = Artista(id=u[0],area=u[1],TypeC=u[2],name=u[3],sort=u[4],id2=u[5],extScore=u[6])
Art.append(u)
conexion.commit()
cursor.close()
for i in Art:
print(i)
except sqlite3.Error as error:
print('Error con la conexión!', error)
finally:
if (conexion):
conexion.close()
def NArt():
try:
conexion = sqlite3.connect('musicBrainzDB.db')
cursor = conexion.cursor()
uMostrar = cursor.execute("SELECT * from Artistas").fetchall()
Art = []
for u in uMostrar:
u = Artista(id=u[0],area=u[1],TypeC=u[2],name=u[3],sort=u[4],id2=u[5],extScore=u[6])
Art.append(u._name)
conexion.commit()
cursor.close()
return len(Art)
except sqlite3.Error as error:
print('Error con la conexión!', error)
finally:
if (conexion):
conexion.close()
def main():
print(showArt())
print(showArt())
if __name__ == '__main__':
main()
| 25.307692
| 96
| 0.556991
| 164
| 1,316
| 4.414634
| 0.323171
| 0.030387
| 0.049724
| 0.069061
| 0.751381
| 0.751381
| 0.751381
| 0.751381
| 0.751381
| 0.751381
| 0
| 0.022752
| 0.298632
| 1,316
| 52
| 97
| 25.307692
| 0.761647
| 0
| 0
| 0.714286
| 0
| 0
| 0.097191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.047619
| 0
| 0.142857
| 0.119048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
736f8fb4d47f90586a3e584c15db3b5e97393730
| 6,281
|
py
|
Python
|
16-GUI/partial-projects/tetris-gui/core_test.py
|
BaseCampCoding/python-fundamentals
|
3804c07841d6604b1e5a1c15126b3301aa8ae306
|
[
"MIT"
] | null | null | null |
16-GUI/partial-projects/tetris-gui/core_test.py
|
BaseCampCoding/python-fundamentals
|
3804c07841d6604b1e5a1c15126b3301aa8ae306
|
[
"MIT"
] | 1
|
2018-07-18T18:01:22.000Z
|
2019-06-14T15:06:28.000Z
|
16-GUI/partial-projects/tetris-gui/core_test.py
|
BaseCampCoding/python-fundamentals
|
3804c07841d6604b1e5a1c15126b3301aa8ae306
|
[
"MIT"
] | null | null | null |
from core import *
import pytest
def test_empty_grid_drop_dot():
''' drops an active dot block in an empty grid'''
assert Grid([], ActiveBlock(3, 5, Block([(0,0)]))).drop() == \
Grid([], ActiveBlock(3, 4, Block([(0,0)])))
@pytest.mark.skip
def test_occupied_grid_drop_dot():
''' drops an active dot block in a grid with another dot '''
assert Grid([Block([(0,0)])], ActiveBlock(3, 5, Block([(0,0)]))).drop() == \
Grid([Block([(0,0)])], ActiveBlock(3, 4, Block([(0,0)])))
@pytest.mark.skip
def test_empty_grid_move_dot_left():
''' moves an active dot block to the left in an empty grid'''
assert Grid([], ActiveBlock(3, 5, Block([(0,0)]))).move('left') == \
Grid([], ActiveBlock(2, 5, Block([(0,0)])))
@pytest.mark.skip
def test_empty_grid_move_dot_right():
''' moves an active dot block to the right in an empty grid'''
assert Grid([], ActiveBlock(3, 5, Block([(0,0)]))).move('right') == \
Grid([], ActiveBlock(4, 5, Block([(0,0)])))
@pytest.mark.skip
def test_empty_grid_move_nonsense_direction():
''' calls move with an invalid direction argument. '''
assert Grid([], ActiveBlock(3, 5, Block([(0,0)]))).move('not left or right') is None
@pytest.mark.skip
def test_rotate_dot():
g = Grid([], ActiveBlock(3, 5, Block([(0,0)])))
assert g.rotate() == g
@pytest.mark.skip
def test_rotate_L():
assert Grid([], ActiveBlock(3, 5, Block([(0, 2), (0, 1), (0, 0), (1, 0)]))).rotate() == \
Grid([], ActiveBlock(3, 5, Block([(2, 0), (1, 0), (0, 0), (0, -1)])))
@pytest.mark.skip
def test_four_rotations_is_identity():
g = Grid([], ActiveBlock(3, 5, Block([(0, 2), (0, 1), (0, 0), (1, 0)])))
assert g.rotate().rotate().rotate().rotate() == g
@pytest.mark.skip
def test_valid_corners_current():
# bottom left
assert not Grid([], ActiveBlock(-1, 0, Block([(0, 0)]))).is_valid()
assert not Grid([], ActiveBlock(-1, -1, Block([(0, 0)]))).is_valid()
assert not Grid([], ActiveBlock(0, -1, Block([(0, 0)]))).is_valid()
assert Grid([], ActiveBlock(0, 0, Block([(0, 0)]))).is_valid()
# bottom right
assert not Grid([], ActiveBlock(WIDTH, 0, Block([(0, 0)]))).is_valid()
assert not Grid([], ActiveBlock(WIDTH, -1, Block([(0, 0)]))).is_valid()
assert not Grid([], ActiveBlock(WIDTH-1, -1, Block([(0, 0)]))).is_valid()
assert Grid([], ActiveBlock(WIDTH-1, 0, Block([(0, 0)]))).is_valid()
# top right
assert not Grid([], ActiveBlock(WIDTH, HEIGHT, Block([(0, 0)]))).is_valid()
assert Grid([], ActiveBlock(WIDTH-1, HEIGHT, Block([(0, 0)]))).is_valid()
assert not Grid([], ActiveBlock(WIDTH, HEIGHT-1, Block([(0, 0)]))).is_valid()
assert Grid([], ActiveBlock(WIDTH-1, HEIGHT-1, Block([(0, 0)]))).is_valid()
# top left
assert Grid([], ActiveBlock(0, HEIGHT, Block([(0, 0)]))).is_valid()
assert not Grid([], ActiveBlock(-1, HEIGHT, Block([(0, 0)]))).is_valid()
assert not Grid([], ActiveBlock(-1, HEIGHT-1, Block([(0, 0)]))).is_valid()
assert Grid([], ActiveBlock(0, HEIGHT-1, Block([(0, 0)]))).is_valid()
@pytest.mark.skip
def test_valid_corners_placed():
# bottom left
assert not Grid([Block([(-1, 0)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert not Grid([Block([(0, -1)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert not Grid([Block([(-1, -1)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert Grid([Block([(0, 0)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
# bottom right
assert not Grid([Block([(WIDTH, 0)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert not Grid([Block([(WIDTH, -1)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert not Grid([Block([(WIDTH-1, -1)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert Grid([Block([(WIDTH-1, 0)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
# top right
assert not Grid([Block([(WIDTH, HEIGHT)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert Grid([Block([(WIDTH-1, HEIGHT)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert not Grid([Block([(WIDTH, HEIGHT-1)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert Grid([Block([(WIDTH-1, HEIGHT-1)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
# top left
assert Grid([Block([(0, HEIGHT)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert not Grid([Block([(-1, HEIGHT)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert not Grid([Block([(-1, HEIGHT-1)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
assert Grid([Block([(0, HEIGHT-1)])], ActiveBlock(WIDTH // 2, HEIGHT // 2, Block([(0,0)]))).is_valid()
@pytest.mark.skip
def test_empty_is_occupied_all():
g = Grid([], ActiveBlock(0, 0, Block([(0,0)])))
assert not any(g.is_occupied((x, y)) for x in range(WIDTH) for y in range(HEIGHT))
@pytest.mark.skip
def test_dot_is_occupied_all():
occupied_posn = (WIDTH // 2, HEIGHT // 2)
unoccupied_posns = {(x, y) for x in range(WIDTH) for y in range(HEIGHT)} - {occupied_posn}
g = Grid([Block([occupied_posn])], ActiveBlock(0, 0, Block([(0,0)])))
assert g.is_occupied(occupied_posn)
assert not any(g.is_occupied(p) for p in unoccupied_posns)
@pytest.mark.skip
def test_empty_clear_full_rows():
g = Grid([], ActiveBlock(0, 0, Block([(0,0)])))
assert g.clear_full_rows() == g
@pytest.mark.skip
def test_bottom_full_dots_clear_full_rows():
g = Grid([Block([(x, 0)]) for x in range(WIDTH)], ActiveBlock(0, 0, Block([(0,0)])))
assert g.clear_full_rows() == Grid([], ActiveBlock(0, 0, Block([(0,0)])))
@pytest.mark.skip
def test_bottom_two_full_dots_clear_full_rows():
g = Grid([Block([(x, y)]) for x in range(WIDTH) for y in (0, 1)], ActiveBlock(0, 0, Block([(0,0)])))
assert g.clear_full_rows() == Grid([], ActiveBlock(0, 0, Block([(0,0)])))
@pytest.mark.skip
def test_place_block_dot():
g = Grid([], ActiveBlock(1, 2, Block([(3,4)])))
assert g.place_block() == Grid([Block([(4, 6)])], None)
| 50.653226
| 114
| 0.604203
| 967
| 6,281
| 3.807653
| 0.077559
| 0.035307
| 0.098859
| 0.078218
| 0.864476
| 0.814231
| 0.756111
| 0.707224
| 0.689842
| 0.623574
| 0
| 0.048536
| 0.156981
| 6,281
| 124
| 115
| 50.653226
| 0.646837
| 0.054768
| 0
| 0.2
| 0
| 0
| 0.004407
| 0
| 0
| 0
| 0
| 0
| 0.494737
| 1
| 0.168421
| false
| 0
| 0.021053
| 0
| 0.189474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
737540640755126dd3b311ad0edf7679232da30f
| 189
|
py
|
Python
|
Server/Python/src/dbs/dao/MySQL/DataTier/Insert.py
|
vkuznet/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 8
|
2015-08-14T04:01:32.000Z
|
2021-06-03T00:56:42.000Z
|
Server/Python/src/dbs/dao/MySQL/DataTier/Insert.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 162
|
2015-01-07T21:34:47.000Z
|
2021-10-13T09:42:41.000Z
|
Server/Python/src/dbs/dao/MySQL/DataTier/Insert.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 16
|
2015-01-22T15:27:29.000Z
|
2021-04-28T09:23:28.000Z
|
#!/usr/bin/env python
""" DAO Object for DataTiers table """
from dbs.dao.Oracle.DataTier.Insert import Insert as OraDataTierInsert
class Insert(OraDataTierInsert):
pass
| 23.625
| 70
| 0.708995
| 23
| 189
| 5.826087
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195767
| 189
| 7
| 71
| 27
| 0.881579
| 0.275132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
73b09e786099c72524a211c3c60285ec1613867a
| 639
|
py
|
Python
|
sendSms.py
|
rahuljain1311/AWS-S3
|
8a6d18bd3ca956d132f32094b8fc36b74b41532f
|
[
"Apache-2.0"
] | null | null | null |
sendSms.py
|
rahuljain1311/AWS-S3
|
8a6d18bd3ca956d132f32094b8fc36b74b41532f
|
[
"Apache-2.0"
] | null | null | null |
sendSms.py
|
rahuljain1311/AWS-S3
|
8a6d18bd3ca956d132f32094b8fc36b74b41532f
|
[
"Apache-2.0"
] | null | null | null |
import boto3
# Create an SNS client
client = boto3.client(
"sns",
aws_access_key_id="AKIAI7R2ADC4LQS5CAEA",
aws_secret_access_key="kSYA0ew6Tk5bUCt3MDUtESFPxORVGzV15iBNfkHE",
region_name="us-west-2"
)
# Send your sms message.
client.publish(
PhoneNumber="+919741381041",
Message="123456789a123456789b123456789c123456789d123456789e123456789f123456789g123456789h123456789i123456789j123456789k123456789l123456789m123456789n123456789o123456789p123456789q123456789r123456789s123456789t123456789u123456789v123456789w123456789x123456789y123456789z123456789a123456789b123456789c123456789d123456789e123456789f123456789g"
)
| 39.9375
| 344
| 0.868545
| 35
| 639
| 15.657143
| 0.742857
| 0.032847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.546689
| 0.078247
| 639
| 15
| 345
| 42.6
| 0.383701
| 0.067293
| 0
| 0
| 0
| 0
| 0.699831
| 0.623946
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
73c1899a09c2a77d5a74722194a106cb075d04fe
| 30
|
py
|
Python
|
src/comment/__init__.py
|
mingyu-si/weibo
|
f7193b076086741827af749b318094cc483994fc
|
[
"MIT"
] | null | null | null |
src/comment/__init__.py
|
mingyu-si/weibo
|
f7193b076086741827af749b318094cc483994fc
|
[
"MIT"
] | null | null | null |
src/comment/__init__.py
|
mingyu-si/weibo
|
f7193b076086741827af749b318094cc483994fc
|
[
"MIT"
] | null | null | null |
from .views import comment_bp
| 15
| 29
| 0.833333
| 5
| 30
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
73cde88013873cdb44cd45761137ab745bb59b62
| 133
|
py
|
Python
|
pyleecan/Methods/Machine/LamSlotMag/__init__.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 95
|
2019-01-23T04:19:45.000Z
|
2022-03-17T18:22:10.000Z
|
pyleecan/Methods/Machine/LamSlotMag/__init__.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 366
|
2019-02-20T07:15:08.000Z
|
2022-03-31T13:37:23.000Z
|
pyleecan/Methods/Machine/LamSlotMag/__init__.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 74
|
2019-01-24T01:47:31.000Z
|
2022-02-25T05:44:42.000Z
|
from ....Methods.Machine.Lamination import LaminationCheckError
class LMC_SlotTooLong(LaminationCheckError):
""" """
pass
| 16.625
| 63
| 0.736842
| 11
| 133
| 8.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150376
| 133
| 7
| 64
| 19
| 0.858407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
fba336bc5aadc27d733d5a6bbd122124684c22d8
| 33
|
py
|
Python
|
hashencoder/__init__.py
|
VCAT19/torch-ngp
|
dcbfe061b30808875a80f12a10a383b51b35f121
|
[
"MIT"
] | 3
|
2022-03-04T09:16:20.000Z
|
2022-03-19T02:57:01.000Z
|
hashencoder/__init__.py
|
VCAT19/torch-ngp
|
dcbfe061b30808875a80f12a10a383b51b35f121
|
[
"MIT"
] | 2
|
2022-03-08T10:54:47.000Z
|
2022-03-11T08:58:18.000Z
|
hashencoder/__init__.py
|
VCAT19/torch-ngp
|
dcbfe061b30808875a80f12a10a383b51b35f121
|
[
"MIT"
] | 1
|
2022-03-21T13:43:48.000Z
|
2022-03-21T13:43:48.000Z
|
from .hashgrid import HashEncoder
| 33
| 33
| 0.878788
| 4
| 33
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fbc7b95cf148aede683204253b9116d11a00531a
| 1,686
|
py
|
Python
|
analyze/calc_run_20180115.py
|
JeroenvO/pulsedpowerplasmaplots
|
dd953359569826edfae321a039b6f9af2340d560
|
[
"MIT"
] | null | null | null |
analyze/calc_run_20180115.py
|
JeroenvO/pulsedpowerplasmaplots
|
dd953359569826edfae321a039b6f9af2340d560
|
[
"MIT"
] | null | null | null |
analyze/calc_run_20180115.py
|
JeroenvO/pulsedpowerplasmaplots
|
dd953359569826edfae321a039b6f9af2340d560
|
[
"MIT"
] | null | null | null |
from analyze.calc_run import *
# first final measurement for normal pulses.
d=-5 # delay
base = 'G:/Prive/MIJN-Documenten/TU/62-Stage/20180115-def1/'
# short quad nocoil
calc_run(base + 'run1',
REACTOR_GLASS_SHORT_QUAD,
scope_multiple=True,
scope_file_name_index=1,
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=30)
# short quad 26uH
calc_run(base + 'run2',
REACTOR_GLASS_SHORT_QUAD,
scope_multiple=True,
scope_file_name_index=1,
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=None)
# # short quad 8uH
# calc_run(base + 'run3',
# REACTOR_GLASS_SHORT_QUAD,
# scope_multiple=True,
# scope_file_name_index=1,
# meas=SHORT_MEAS_LEN,
# current_scaling=0.5,
# delay=d,
# voltage_offset=None)
# short quad nocoil long meas
calc_run(base + 'run4',
REACTOR_GLASS_SHORT_QUAD,
scope_multiple=True,
scope_file_name_index=1,
meas=LONG_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=30)
# long react 26uH
calc_run(base + 'run5',
REACTOR_GLASS_LONG,
scope_multiple=True,
scope_file_name_index=1,
meas=SHORT_MEAS_LEN,
current_scaling=0.5,
delay=d,
voltage_offset=None)
# # long react 46 uh
# calc_run(base + 'run6',
# REACTOR_GLASS_LONG,
# scope_multiple=True,
# scope_file_name_index=1,
# meas=SHORT_MEAS_LEN,
# current_scaling=0.5,
# delay=d,
# voltage_offset=None)
| 24.794118
| 60
| 0.604389
| 217
| 1,686
| 4.368664
| 0.262673
| 0.075949
| 0.06962
| 0.139241
| 0.707806
| 0.707806
| 0.707806
| 0.707806
| 0.707806
| 0.707806
| 0
| 0.039932
| 0.301898
| 1,686
| 67
| 61
| 25.164179
| 0.765506
| 0.365362
| 0
| 0.742857
| 0
| 0
| 0.064238
| 0.048897
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028571
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fbde365aadbca534934b0103bb5959e0bf4ef892
| 274
|
py
|
Python
|
camp/UnstructuredGridOperators/BinaryOperators/__init__.py
|
blakezim/CAMP
|
a42a407dc62151ab8a7eb4be3aee1318b984502c
|
[
"MIT"
] | 4
|
2021-03-02T05:18:06.000Z
|
2021-11-29T16:06:39.000Z
|
camp/UnstructuredGridOperators/BinaryOperators/__init__.py
|
blakezim/CAMP
|
a42a407dc62151ab8a7eb4be3aee1318b984502c
|
[
"MIT"
] | null | null | null |
camp/UnstructuredGridOperators/BinaryOperators/__init__.py
|
blakezim/CAMP
|
a42a407dc62151ab8a7eb4be3aee1318b984502c
|
[
"MIT"
] | 1
|
2021-03-26T20:38:11.000Z
|
2021-03-26T20:38:11.000Z
|
from .CurrentsEnergyFilter import CurrentsEnergy
from .AffineCurrentsFilter import AffineCurrents
from .DeformableCurrentsFilter import DeformableCurrents
from .StitchingCurrentsFilter import StitchingCurrents
from .SingleAngleAffineCurrentsFilter import SingleAngleCurrents
| 54.8
| 64
| 0.912409
| 20
| 274
| 12.5
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069343
| 274
| 5
| 64
| 54.8
| 0.980392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83e1bbc75123fc04b7cd9f4dd338297a5d70a08e
| 126,431
|
py
|
Python
|
tests/test_nnn.py
|
llinke1/TreeCorr
|
02f4c0547ac1917f77a9e1e3c55d7677fd2ec78f
|
[
"BSD-2-Clause-FreeBSD"
] | 86
|
2015-02-09T05:46:13.000Z
|
2022-01-12T17:00:33.000Z
|
tests/test_nnn.py
|
llinke1/TreeCorr
|
02f4c0547ac1917f77a9e1e3c55d7677fd2ec78f
|
[
"BSD-2-Clause-FreeBSD"
] | 102
|
2015-02-25T04:41:34.000Z
|
2022-03-16T23:41:53.000Z
|
tests/test_nnn.py
|
llinke1/TreeCorr
|
02f4c0547ac1917f77a9e1e3c55d7677fd2ec78f
|
[
"BSD-2-Clause-FreeBSD"
] | 38
|
2015-07-20T15:14:12.000Z
|
2022-03-24T06:37:01.000Z
|
# Copyright (c) 2003-2019 by Mike Jarvis
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import treecorr
import os
import coord
import fitsio
from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog, timer
from test_helper import is_ccw, is_ccw_3d
@timer
def test_log_binning():
import math
# Test some basic properties of the base class
def check_arrays(nnn):
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.ubin_size * nnn.nubins, nnn.max_u-nnn.min_u)
np.testing.assert_almost_equal(nnn.vbin_size * nnn.nvbins, nnn.max_v-nnn.min_v)
#print('logr = ',nnn.logr1d)
np.testing.assert_equal(nnn.logr1d.shape, (nnn.nbins,) )
np.testing.assert_almost_equal(nnn.logr1d[0], math.log(nnn.min_sep) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr1d[-1], math.log(nnn.max_sep) - 0.5*nnn.bin_size)
np.testing.assert_equal(nnn.logr.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.logr[:,0,0], nnn.logr1d)
np.testing.assert_almost_equal(nnn.logr[:,-1,-1], nnn.logr1d)
assert len(nnn.logr) == nnn.nbins
#print('u = ',nnn.u1d)
np.testing.assert_equal(nnn.u1d.shape, (nnn.nubins,) )
np.testing.assert_almost_equal(nnn.u1d[0], nnn.min_u + 0.5*nnn.ubin_size)
np.testing.assert_almost_equal(nnn.u1d[-1], nnn.max_u - 0.5*nnn.ubin_size)
np.testing.assert_equal(nnn.u.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.u[0,:,0], nnn.u1d)
np.testing.assert_almost_equal(nnn.u[-1,:,-1], nnn.u1d)
#print('v = ',nnn.v1d)
np.testing.assert_equal(nnn.v1d.shape, (2*nnn.nvbins,) )
np.testing.assert_almost_equal(nnn.v1d[0], -nnn.max_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[-1], nnn.max_v - 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins], nnn.min_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins-1], -nnn.min_v - 0.5*nnn.vbin_size)
np.testing.assert_equal(nnn.v.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.v[0,0,:], nnn.v1d)
np.testing.assert_almost_equal(nnn.v[-1,-1,:], nnn.v1d)
def check_defaultuv(nnn):
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == np.ceil(1./nnn.ubin_size)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == np.ceil(1./nnn.vbin_size)
# Check the different ways to set up the binning:
# Omit bin_size
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, bin_type='LogRUV')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, n for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20,
min_u=0.2, max_u=0.9, nubins=12,
min_v=0., max_v=0.2, nvbins=2)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 12
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 2
check_arrays(nnn)
# Omit min_sep
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify max, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1,
max_u=0.9, nubins=3, ubin_size=0.05,
max_v=0.4, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert np.isclose(nnn.ubin_size, 0.05)
assert np.isclose(nnn.min_u, 0.75)
assert nnn.max_u == 0.9
assert nnn.nubins == 3
assert np.isclose(nnn.vbin_size, 0.05)
assert np.isclose(nnn.min_v, 0.2)
assert nnn.max_v == 0.4
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit max_sep
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.min_sep == 5.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1,
min_u=0.7, nubins=4, ubin_size=0.05,
min_v=0.2, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.bin_size == 0.1
assert nnn.nbins == 20
assert nnn.min_u == 0.7
assert np.isclose(nnn.ubin_size, 0.05)
assert nnn.nubins == 4
assert nnn.min_v == 0.2
assert nnn.max_v == 0.4
assert np.isclose(nnn.vbin_size, 0.05)
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit nbins
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=0.1, max_v=0.3, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.bin_size <= 0.1
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 24
assert np.isclose(nnn.ubin_size, 0.7/24)
assert nnn.min_v == 0.1
assert nnn.max_v == 0.3
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only one of min/max v are set, respect that
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, ubin_size=0.03,
min_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.2
assert nnn.max_u == 1.
assert nnn.nubins == 27
assert np.isclose(nnn.ubin_size, 0.8/27)
assert nnn.min_v == 0.2
assert nnn.max_v == 1.
assert nnn.nvbins == 12
assert np.isclose(nnn.vbin_size, 0.8/12)
check_arrays(nnn)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
max_u=0.2, ubin_size=0.03,
max_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.
assert nnn.max_u == 0.2
assert nnn.nubins == 7
assert np.isclose(nnn.ubin_size, 0.2/7)
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only vbin_size is set for v, automatically figure out others.
# (And if necessary adjust the bin_size down a bit.)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.3, vbin_size=0.3)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 4
assert np.isclose(nnn.ubin_size, 0.25)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 4
assert np.isclose(nnn.vbin_size, 0.25)
check_arrays(nnn)
# If only nvbins is set for v, automatically figure out others.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
nubins=5, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 5
assert np.isclose(nnn.ubin_size,0.2)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 5
assert np.isclose(nnn.vbin_size,0.2)
check_arrays(nnn)
# If both nvbins and vbin_size are set, set min/max automatically
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.1, nubins=5,
vbin_size=0.1, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.ubin_size == 0.1
assert nnn.nubins == 5
assert nnn.max_u == 1.
assert np.isclose(nnn.min_u,0.5)
assert nnn.vbin_size == 0.1
assert nnn.nvbins == 5
assert nnn.min_v == 0.
assert np.isclose(nnn.max_v,0.5)
check_arrays(nnn)
assert_raises(TypeError, treecorr.NNNCorrelation)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, bin_size=0.1)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Log')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Linear')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='TwoD')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Invalid')
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.3, max_u=0.9, ubin_size=0.1, nubins=6)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.9, max_u=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=-0.1, max_u=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.1, max_u=1.3)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v=0.9, vbin_size=0.1, nvbins=9)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.9, max_v=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=-0.1, max_v=0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v=1.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
split_method='invalid')
# Check the use of sep_units
# radians
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5.)
np.testing.assert_almost_equal(nnn._max_sep, 20.)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# arcsec
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/3600)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/3600)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
# Note that logr is in the separation units, not radians.
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# arcmin
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/60)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/60)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# degrees
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# hours
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/12)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/12)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# Check bin_slop
# Start with default behavior
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Explicitly set bin_slop=1.0 does the same thing.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Use a smaller bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.2,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.2
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.02)
np.testing.assert_almost_equal(nnn.bu, 0.006)
np.testing.assert_almost_equal(nnn.bv, 0.014)
# Use bin_slop == 0
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.0)
np.testing.assert_almost_equal(nnn.bu, 0.0)
np.testing.assert_almost_equal(nnn.bv, 0.0)
# Bigger bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=2.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 2.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.2)
np.testing.assert_almost_equal(nnn.bu, 0.06)
np.testing.assert_almost_equal(nnn.bv, 0.14)
# With bin_size > 0.1, explicit bin_slop=1.0 is accepted.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.4)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# But implicit bin_slop is reduced so that b = 0.1
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
np.testing.assert_almost_equal(nnn.bin_slop, 0.25)
# Separately for each of the three parameters
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.05,
min_u=0., max_u=0.9, ubin_size=0.3,
min_v=0., max_v=0.17, vbin_size=0.17)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.05
assert np.isclose(nnn.ubin_size, 0.3)
assert np.isclose(nnn.vbin_size, 0.17)
np.testing.assert_almost_equal(nnn.b, 0.05)
np.testing.assert_almost_equal(nnn.bu, 0.1)
np.testing.assert_almost_equal(nnn.bv, 0.1)
np.testing.assert_almost_equal(nnn.bin_slop, 1.0) # The stored bin_slop is just for lnr
@timer
def test_direct_count_auto():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if bin_slop=0.
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2)
djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x[i],y[i],x[j],y[j],x[k],y[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x[j],y[j],x[i],y[i],x[k],y[k])
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x[j],y[j],x[k],y[k],x[i],y[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x[i],y[i],x[k],y[k],x[j],y[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x[k],y[k],x[i],y[i],x[j],y[j])
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x[k],y[k],x[j],y[j],x[i],y[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
nz = np.where((ddd.ntri > 0) | (true_ntri > 0))
print('non-zero at:')
print(nz)
print('d1 = ',ddd.meand1[nz])
print('d2 = ',ddd.meand2[nz])
print('d3 = ',ddd.meand3[nz])
print('rnom = ',ddd.rnom[nz])
print('u = ',ddd.u[nz])
print('v = ',ddd.v[nz])
print('ddd.ntri = ',ddd.ntri[nz])
print('true_ntri = ',true_ntri[nz])
print('diff = ',ddd.ntri[nz] - true_ntri[nz])
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Check that running via the corr3 script works correctly.
file_name = os.path.join('data','nnn_direct_data.dat')
with open(file_name, 'w') as fid:
for i in range(ngal):
fid.write(('%.20f %.20f\n')%(x[i],y[i]))
L = 10*s
nrand = ngal
rx = (rng.random_sample(nrand)-0.5) * L
ry = (rng.random_sample(nrand)-0.5) * L
rcat = treecorr.Catalog(x=rx, y=ry)
rand_file_name = os.path.join('data','nnn_direct_rand.dat')
with open(rand_file_name, 'w') as fid:
for i in range(nrand):
fid.write(('%.20f %.20f\n')%(rx[i],ry[i]))
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0, rng=rng)
rrr.process(rcat)
zeta, varzeta = ddd.calculateZeta(rrr)
# Semi-gratuitous check of BinnedCorr3.rng access.
assert rrr.rng is rng
assert ddd.rng is not rng
# First do this via the corr3 function.
config = treecorr.config.read_config('configs/nnn_direct.yaml')
logger = treecorr.config.setup_logger(0)
treecorr.corr3(config, logger)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
print('corr3_output = ',corr3_output)
print('corr3_output.dtype = ',corr3_output.dtype)
print('rnom = ',ddd.rnom.flatten())
print(' ',corr3_output['r_nom'])
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
print('unom = ',ddd.u.flatten())
print(' ',corr3_output['u_nom'])
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
print('vnom = ',ddd.v.flatten())
print(' ',corr3_output['v_nom'])
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
print('DDD = ',ddd.ntri.flatten())
print(' ',corr3_output['DDD'])
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
print('RRR = ',rrr.ntri.flatten())
print(' ',corr3_output['RRR'])
np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten(), rtol=1.e-3)
print('zeta = ',zeta.flatten())
print('from corr3 output = ',corr3_output['zeta'])
print('diff = ',corr3_output['zeta']-zeta.flatten())
diff_index = np.where(np.abs(corr3_output['zeta']-zeta.flatten()) > 1.e-5)[0]
print('different at ',diff_index)
print('zeta[diffs] = ',zeta.flatten()[diff_index])
print('corr3.zeta[diffs] = ',corr3_output['zeta'][diff_index])
print('diff[diffs] = ',zeta.flatten()[diff_index] - corr3_output['zeta'][diff_index])
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3)
# Now calling out to the external corr3 executable.
# This is the only time we test the corr3 executable. All other tests use corr3 function.
import subprocess
corr3_exe = get_script_name('corr3')
p = subprocess.Popen( [corr3_exe,"configs/nnn_direct.yaml","verbose=0"] )
p.communicate()
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
# Also check compensated
drr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0)
rdd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0)
drr.process(cat, rcat)
rdd.process(rcat, cat)
zeta, varzeta = ddd.calculateZeta(rrr,drr,rdd)
config['nnn_statistic'] = 'compensated'
treecorr.corr3(config, logger)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True, skip_header=1)
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
print('rrr.tot = ',rrr.tot)
print('ddd.tot = ',ddd.tot)
print('drr.tot = ',drr.tot)
print('rdd.tot = ',rdd.tot)
rrrf = ddd.tot / rrr.tot
drrf = ddd.tot / drr.tot
rddf = ddd.tot / rdd.tot
np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten() * rrrf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['DRR'], drr.ntri.flatten() * drrf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['RDD'], rdd.ntri.flatten() * rddf, rtol=1.e-3)
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3)
# Repeat with binslop = 0, since the code flow is different from bture=True
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And compare to the cross correlation
# Here, we get 6x as much, since each triangle is discovered 6 times.
ddd.clear()
ddd.process(cat,cat,cat, num_threads=2)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, 6*true_ntri)
# With the real CrossCorrelation class, each of the 6 correlations should end up being
# the same thing (without the extra factor of 6).
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
dddc.process(cat,cat,cat, num_threads=2)
# All 6 correlations are equal.
for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]:
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(d.ntri, true_ntri)
# Or with 2 argument version, finds each triangle 3 times.
ddd.process(cat,cat, num_threads=2)
np.testing.assert_array_equal(ddd.ntri, 3*true_ntri)
# Again, NNNCrossCorrelation gets it right in each permutation.
dddc.process(cat,cat, num_threads=2)
for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]:
np.testing.assert_array_equal(d.ntri, true_ntri)
# Invalid to omit file_name
config['verbose'] = 0
del config['file_name']
with assert_raises(TypeError):
treecorr.corr3(config)
config['file_name'] = 'data/nnn_direct_data.dat'
# OK to not have rand_file_name
# Also, check the automatic setting of output_dots=True when verbose=2.
# It's not too annoying if we also set max_top = 0.
del config['rand_file_name']
config['verbose'] = 2
config['max_top'] = 0
treecorr.corr3(config)
data = np.genfromtxt(config['nnn_file_name'], names=True, skip_header=1)
np.testing.assert_array_equal(data['ntri'], true_ntri.flatten())
assert 'zeta' not in data.dtype.names
# Check a few basic operations with a NNNCorrelation object.
do_pickle(ddd)
ddd2 = ddd.copy()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, 2*ddd.ntri)
np.testing.assert_allclose(ddd2.weight, 2*ddd.weight)
np.testing.assert_allclose(ddd2.meand1, 2*ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, 2*ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, 2*ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, 2*ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, 2*ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, 2*ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, 2*ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, 2*ddd.meanv)
ddd2.clear()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, ddd.ntri)
np.testing.assert_allclose(ddd2.weight, ddd.weight)
np.testing.assert_allclose(ddd2.meand1, ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, ddd.meanv)
ascii_name = 'output/nnn_ascii.txt'
ddd.write(ascii_name, precision=16)
ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd3.read(ascii_name)
np.testing.assert_allclose(ddd3.ntri, ddd.ntri)
np.testing.assert_allclose(ddd3.weight, ddd.weight)
np.testing.assert_allclose(ddd3.meand1, ddd.meand1)
np.testing.assert_allclose(ddd3.meand2, ddd.meand2)
np.testing.assert_allclose(ddd3.meand3, ddd.meand3)
np.testing.assert_allclose(ddd3.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd3.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd3.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd3.meanu, ddd.meanu)
np.testing.assert_allclose(ddd3.meanv, ddd.meanv)
with assert_raises(TypeError):
ddd2 += config
ddd4 = treecorr.NNNCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd4
ddd5 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd5
ddd6 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd6
ddd7 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u-0.1, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd7
ddd8 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u+0.1, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd8
ddd9 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins*2,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd9
ddd10 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v-0.1, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd10
ddd11 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v+0.1, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd11
ddd12 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins*2)
with assert_raises(ValueError):
ddd2 += ddd12
# Check that adding results with different coords or metric emits a warning.
cat2 = treecorr.Catalog(x=x, y=y, z=x)
with CaptureLog() as cl:
ddd13 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd13.process_auto(cat2)
ddd13 += ddd2
print(cl.output)
assert "Detected a change in catalog coordinate systems" in cl.output
with CaptureLog() as cl:
ddd14 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd14.process_auto(cat2, metric='Arc')
ddd14 += ddd2
assert "Detected a change in metric" in cl.output
fits_name = 'output/nnn_fits.fits'
ddd.write(fits_name)
ddd15 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd15.read(fits_name)
np.testing.assert_allclose(ddd15.ntri, ddd.ntri)
np.testing.assert_allclose(ddd15.weight, ddd.weight)
np.testing.assert_allclose(ddd15.meand1, ddd.meand1)
np.testing.assert_allclose(ddd15.meand2, ddd.meand2)
np.testing.assert_allclose(ddd15.meand3, ddd.meand3)
np.testing.assert_allclose(ddd15.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd15.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd15.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd15.meanu, ddd.meanu)
np.testing.assert_allclose(ddd15.meanv, ddd.meanv)
@timer
def test_direct_count_cross():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if brute=True
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2 = treecorr.Catalog(x=x2, y=y2)
x3 = rng.normal(0,s, (ngal,) )
y3 = rng.normal(0,s, (ngal,) )
cat3 = treecorr.Catalog(x=x3, y=y3)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(ngal):
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
true_ntri = true_ntri_123
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x3[k],y3[k])
true_ntri = true_ntri_213
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x2[j],y2[j],x3[k],y3[k],x1[i],y1[i])
true_ntri = true_ntri_231
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x1[i],y1[i],x3[k],y3[k],x2[j],y2[j])
true_ntri = true_ntri_132
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x3[k],y3[k],x1[i],y1[i],x2[j],y2[j])
true_ntri = true_ntri_312
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x3[k],y3[k],x2[j],y2[j],x1[i],y1[i])
true_ntri = true_ntri_321
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
# With the regular NNNCorrelation class, we end up with the sum of all permutations.
true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\
true_ntri_312 + true_ntri_321
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Now repeat with the full CrossCorrelation class, which distinguishes the permutations.
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
dddc.process(cat1, cat2, cat3)
#print('true_ntri_123 = ',true_ntri_123)
#print('diff = ',dddc.n1n2n3.ntri - true_ntri_123)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_321)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2, cat3)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Error to have cat3, but not cat2
with assert_raises(ValueError):
ddd.process(cat1, cat3=cat3)
# Check a few basic operations with a NNCrossCorrelation object.
do_pickle(dddc)
dddc2 = dddc.copy()
dddc2 += dddc
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc2, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.ntri, 2*d1.ntri)
np.testing.assert_allclose(d2.meand1, 2*d1.meand1)
np.testing.assert_allclose(d2.meand2, 2*d1.meand2)
np.testing.assert_allclose(d2.meand3, 2*d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, 2*d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, 2*d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, 2*d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, 2*d1.meanu)
np.testing.assert_allclose(d2.meanv, 2*d1.meanv)
dddc2.clear()
dddc2 += dddc
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc2, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
with assert_raises(TypeError):
dddc2 += {} # not an NNNCrossCorrelation
with assert_raises(TypeError):
dddc2 += ddd # not an NNNCrossCorrelation
dddc4 = treecorr.NNNCrossCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
dddc2 += dddc4 # binning doesn't match
# Test I/O
ascii_name = 'output/nnnc_ascii.txt'
dddc.write(ascii_name, precision=16)
dddc3 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
dddc3.read(ascii_name)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc3, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
fits_name = 'output/nnnc_fits.fits'
dddc.write(fits_name)
dddc4 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
dddc4.read(fits_name)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc4, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
try:
import h5py
except ImportError:
print('Skipping hdf5 output file, since h5py not installed.')
return
hdf5_name = 'output/nnnc_hdf5.hdf5'
dddc.write(hdf5_name)
dddc5 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
dddc5.read(hdf5_name)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
d2 = getattr(dddc5, perm)
d1 = getattr(dddc, perm)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.ntri, d1.ntri)
np.testing.assert_allclose(d2.meand1, d1.meand1)
np.testing.assert_allclose(d2.meand2, d1.meand2)
np.testing.assert_allclose(d2.meand3, d1.meand3)
np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1)
np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2)
np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3)
np.testing.assert_allclose(d2.meanu, d1.meanu)
np.testing.assert_allclose(d2.meanv, d1.meanv)
@timer
def test_direct_count_cross12():
# Check the 1-2 cross correlation
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2 = treecorr.Catalog(x=x2, y=y2)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_122 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_212 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_221 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
dik = np.sqrt((x1[i]-x2[k])**2 + (y1[i]-y2[k])**2)
djk = np.sqrt((x2[j]-x2[k])**2 + (y2[j]-y2[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x2[k],y2[k])
true_ntri = true_ntri_122
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x2[k],y2[k])
true_ntri = true_ntri_212
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x2[j],y2[j],x2[k],y2[k],x1[i],y1[i])
true_ntri = true_ntri_221
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[k],y2[k],x2[j],y2[j])
true_ntri = true_ntri_122
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x2[k],y2[k],x1[i],y1[i],x2[j],y2[j])
true_ntri = true_ntri_212
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x2[k],y2[k],x2[j],y2[j],x1[i],y1[i])
true_ntri = true_ntri_221
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
# With the regular NNNCorrelation class, we end up with the sum of all permutations.
true_ntri_sum = true_ntri_122 + true_ntri_212 + true_ntri_221
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Now repeat with the full CrossCorrelation class, which distinguishes the permutations.
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
dddc.process(cat1, cat2)
#print('true_ntri_122 = ',true_ntri_122)
#print('diff = ',dddc.n1n2n3.ntri - true_ntri_122)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_221)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_221)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Split into patches to test the list-based version of the code.
cat1 = treecorr.Catalog(x=x1, y=y1, npatch=10)
cat2 = treecorr.Catalog(x=x2, y=y2, npatch=10)
ddd.process(cat1, cat2)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
dddc.process(cat1, cat2)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_122)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_221)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_212)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_221)
@timer
def test_direct_spherical():
# Repeat in spherical coords
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 200 # Put everything at large y, so small angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
min_sep = 1.
bin_size = 0.2
nrbins = 10
nubins = 5
nvbins = 5
ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
sep_units='deg', brute=True)
ddd.process(cat, num_threads=2)
r = np.sqrt(x**2 + y**2 + z**2)
x /= r; y /= r; z /= r
true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
rad_min_sep = min_sep * coord.degrees / coord.radians
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
d12 = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
d23 = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
d31 = np.sqrt((x[k]-x[i])**2 + (y[k]-y[i])**2 + (z[k]-z[i])**2)
d3, d2, d1 = sorted([d12, d23, d31])
rindex = np.floor(np.log(d2/rad_min_sep) / bin_size).astype(int)
if rindex < 0 or rindex >= nrbins: continue
if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
else: assert False
# Now use ii, jj, kk rather than i,j,k, to get the indices
# that correspond to the points in the right order.
u = d3/d2
v = (d1-d2)/d3
if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] +
((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] +
((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0:
v = -v
uindex = np.floor(u / bin_size).astype(int)
assert 0 <= uindex < nubins
vindex = np.floor((v+1) / bin_size).astype(int)
assert 0 <= vindex < 2*nvbins
www = w[i] * w[j] * w[k]
true_ntri[rindex,uindex,vindex] += 1
true_weight[rindex,uindex,vindex] += www
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
# Check that running via the corr3 script works correctly.
config = treecorr.config.read_config('configs/nnn_direct_spherical.yaml')
cat.write(config['file_name'])
treecorr.corr3(config)
data = fitsio.read(config['nnn_file_name'])
np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten())
np.testing.assert_allclose(data['u_nom'], ddd.u.flatten())
np.testing.assert_allclose(data['v_nom'], ddd.v.flatten())
np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten())
np.testing.assert_allclose(data['DDD'], ddd.weight.flatten())
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
sep_units='deg', bin_slop=0, max_top=0)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
@timer
def test_direct_arc():
# Repeat the spherical test with metric='Arc'
ngal = 5
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 200 # Large angles this time.
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
min_sep = 1.
max_sep = 180.
nrbins = 50
nubins = 5
nvbins = 5
bin_size = np.log((max_sep / min_sep)) / nrbins
ubin_size = 0.2
vbin_size = 0.2
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,
nubins=nubins, ubin_size=ubin_size,
nvbins=nvbins, vbin_size=vbin_size,
sep_units='deg', brute=True)
ddd.process(cat, metric='Arc')
r = np.sqrt(x**2 + y**2 + z**2)
x /= r; y /= r; z /= r
true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
c = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra, dec)]
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
d12 = c[i].distanceTo(c[j]) / coord.degrees
d23 = c[j].distanceTo(c[k]) / coord.degrees
d31 = c[k].distanceTo(c[i]) / coord.degrees
d3, d2, d1 = sorted([d12, d23, d31])
rindex = np.floor(np.log(d2/min_sep) / bin_size).astype(int)
if rindex < 0 or rindex >= nrbins: continue
if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
else: assert False
# Now use ii, jj, kk rather than i,j,k, to get the indices
# that correspond to the points in the right order.
u = d3/d2
v = (d1-d2)/d3
if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] +
((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] +
((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0:
v = -v
uindex = np.floor(u / ubin_size).astype(int)
assert 0 <= uindex < nubins
vindex = np.floor((v+1) / vbin_size).astype(int)
assert 0 <= vindex < 2*nvbins
www = w[i] * w[j] * w[k]
true_ntri[rindex,uindex,vindex] += 1
true_weight[rindex,uindex,vindex] += www
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
# Check that running via the corr3 script works correctly.
config = treecorr.config.read_config('configs/nnn_direct_arc.yaml')
cat.write(config['file_name'])
treecorr.corr3(config)
data = fitsio.read(config['nnn_file_name'])
np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten())
np.testing.assert_allclose(data['u_nom'], ddd.u.flatten())
np.testing.assert_allclose(data['v_nom'], ddd.v.flatten())
np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten())
np.testing.assert_allclose(data['DDD'], ddd.weight.flatten())
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,
nubins=nubins, ubin_size=ubin_size,
nvbins=nvbins, vbin_size=vbin_size,
sep_units='deg', bin_slop=0, max_top=0)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
@timer
def test_direct_partial():
# Test the two ways to only use parts of a catalog:
ngal = 100
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1a = treecorr.Catalog(x=x1, y=y1, first_row=28, last_row=84)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2a = treecorr.Catalog(x=x2, y=y2, first_row=48, last_row=99)
x3 = rng.normal(0,s, (ngal,) )
y3 = rng.normal(0,s, (ngal,) )
cat3a = treecorr.Catalog(x=x3, y=y3, first_row=22, last_row=67)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddda = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
ddda.process(cat1a, cat2a, cat3a)
#print('ddda.ntri = ',ddda.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(27,84):
for j in range(47,99):
for k in range(21,67):
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
true_ntri = true_ntri_123
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x3[k],y3[k])
true_ntri = true_ntri_213
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw(x2[j],y2[j],x3[k],y3[k],x1[i],y1[i])
true_ntri = true_ntri_231
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw(x1[i],y1[i],x3[k],y3[k],x2[j],y2[j])
true_ntri = true_ntri_132
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw(x3[k],y3[k],x1[i],y1[i],x2[j],y2[j])
true_ntri = true_ntri_312
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw(x3[k],y3[k],x2[j],y2[j],x1[i],y1[i])
true_ntri = true_ntri_321
assert d1 >= d2 >= d3
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\
true_ntri_312 + true_ntri_321
print('true_ntri = ',true_ntri_sum)
print('diff = ',ddda.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddda.ntri, true_ntri_sum)
# Now with real CrossCorrelation
ddda = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
ddda.process(cat1a, cat2a, cat3a)
#print('132 = ',ddda.n1n3n2.ntri)
#print('true 132 = ',true_ntri_132)
#print('213 = ',ddda.n2n1n3.ntri)
#print('true 213 = ',true_ntri_213)
#print('231 = ',ddda.n2n3n1.ntri)
#print('true 231 = ',true_ntri_231)
#print('311 = ',ddda.n3n1n2.ntri)
#print('true 312 = ',true_ntri_312)
#print('321 = ',ddda.n3n2n1.ntri)
#print('true 321 = ',true_ntri_321)
np.testing.assert_array_equal(ddda.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddda.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddda.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddda.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddda.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddda.n3n2n1.ntri, true_ntri_321)
# Now check that we get the same thing with all the points, but with w=0 for the ones
# we don't want.
w1 = np.zeros(ngal)
w1[27:84] = 1.
w2 = np.zeros(ngal)
w2[47:99] = 1.
w3 = np.zeros(ngal)
w3[21:67] = 1.
cat1b = treecorr.Catalog(x=x1, y=y1, w=w1)
cat2b = treecorr.Catalog(x=x2, y=y2, w=w2)
cat3b = treecorr.Catalog(x=x3, y=y3, w=w3)
dddb = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
dddb.process(cat1b, cat2b, cat3b)
#print('dddb.ntri = ',dddb.ntri)
#print('diff = ',dddb.ntri - true_ntri_sum)
np.testing.assert_array_equal(dddb.ntri, true_ntri_sum)
dddb = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
dddb.process(cat1b, cat2b, cat3b)
#print('dddb.n1n2n3.ntri = ',dddb.n1n2n3.ntri)
#print('diff = ',dddb.n1n2n3.ntri - true_ntri)
np.testing.assert_array_equal(dddb.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(dddb.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(dddb.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(dddb.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(dddb.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(dddb.n3n2n1.ntri, true_ntri_321)
@timer
def test_direct_3d_auto():
# This is the same as test_direct_count_auto, but using the 3d correlations
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(312, s, (ngal,) )
y = rng.normal(728, s, (ngal,) )
z = rng.normal(-932, s, (ngal,) )
r = np.sqrt( x*x + y*y + z*z )
dec = np.arcsin(z/r)
ra = np.arctan2(y,x)
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad')
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2 + (z[i]-z[k])**2)
djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw_3d(x[i],y[i],z[i],x[j],y[j],z[j],x[k],y[k],z[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw_3d(x[j],y[j],z[j],x[i],y[i],z[i],x[k],y[k],z[k])
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw_3d(x[j],y[j],z[j],x[k],y[k],z[k],x[i],y[i],z[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw_3d(x[i],y[i],z[i],x[k],y[k],z[k],x[j],y[j],z[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw_3d(x[k],y[k],z[k],x[i],y[i],z[i],x[j],y[j],z[j])
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw_3d(x[k],y[k],z[k],x[j],y[j],z[j],x[i],y[i],z[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And compare to the cross correlation
# Here, we get 6x as much, since each triangle is discovered 6 times.
ddd.clear()
ddd.process(cat,cat,cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, 6*true_ntri)
dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
dddc.process(cat,cat,cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri)
np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri)
# Also compare to using x,y,z rather than ra,dec,r
cat = treecorr.Catalog(x=x, y=y, z=z)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
@timer
def test_direct_3d_cross():
# This is the same as test_direct_count_cross, but using the 3d correlations
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(312, s, (ngal,) )
y1 = rng.normal(728, s, (ngal,) )
z1 = rng.normal(-932, s, (ngal,) )
r1 = np.sqrt( x1*x1 + y1*y1 + z1*z1 )
dec1 = np.arcsin(z1/r1)
ra1 = np.arctan2(y1,x1)
cat1 = treecorr.Catalog(ra=ra1, dec=dec1, r=r1, ra_units='rad', dec_units='rad')
x2 = rng.normal(312, s, (ngal,) )
y2 = rng.normal(728, s, (ngal,) )
z2 = rng.normal(-932, s, (ngal,) )
r2 = np.sqrt( x2*x2 + y2*y2 + z2*z2 )
dec2 = np.arcsin(z2/r2)
ra2 = np.arctan2(y2,x2)
cat2 = treecorr.Catalog(ra=ra2, dec=dec2, r=r2, ra_units='rad', dec_units='rad')
x3 = rng.normal(312, s, (ngal,) )
y3 = rng.normal(728, s, (ngal,) )
z3 = rng.normal(-932, s, (ngal,) )
r3 = np.sqrt( x3*x3 + y3*y3 + z3*z3 )
dec3 = np.arcsin(z3/r3)
ra3 = np.arctan2(y3,x3)
cat3 = treecorr.Catalog(ra=ra3, dec=dec3, r=r3, ra_units='rad', dec_units='rad')
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) )
true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(ngal):
djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2 + (z2[j]-z3[k])**2)
dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2 + (z1[i]-z3[k])**2)
dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk
ccw = is_ccw_3d(x1[i],y1[i],z1[i],x2[j],y2[j],z2[j],x3[k],y3[k],z3[k])
true_ntri = true_ntri_123
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik
ccw = is_ccw_3d(x2[j],y2[j],z2[j],x1[i],y1[i],z1[i],x3[k],y3[k],z3[k])
true_ntri = true_ntri_213
else:
d3 = djk; d2 = dij; d1 = dik
ccw = is_ccw_3d(x2[j],y2[j],z2[j],x3[k],y3[k],z3[k],x1[i],y1[i],z1[i])
true_ntri = true_ntri_231
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk
ccw = is_ccw_3d(x1[i],y1[i],z1[i],x3[k],y3[k],z3[k],x2[j],y2[j],z2[j])
true_ntri = true_ntri_132
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij
ccw = is_ccw_3d(x3[k],y3[k],z3[k],x1[i],y1[i],z1[i],x2[j],y2[j],z2[j])
true_ntri = true_ntri_312
else:
d3 = djk; d2 = dik; d1 = dij
ccw = is_ccw_3d(x3[k],y3[k],z3[k],x2[j],y2[j],z2[j],x1[i],y1[i],z1[i])
true_ntri = true_ntri_321
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
# With the regular NNNCorrelation class, we end up with the sum of all permutations.
true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\
true_ntri_312 + true_ntri_321
#print('true_ntri = ',true_ntri_sum)
#print('diff = ',ddd.ntri - true_ntri_sum)
np.testing.assert_array_equal(ddd.ntri, true_ntri_sum)
# Now repeat with the full CrossCorrelation class, which distinguishes the permutations.
ddd = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('true_ntri = ',true_ntri_123)
#print('diff = ',ddd.n1n2n3.ntri - true_ntri_123)
np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddd.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddd.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddd.n3n2n1.ntri, true_ntri_321)
# Repeat with binslop = 0
ddd = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('binslop = 0: ddd.n1n2n3.ntri = ',ddd.n1n2n3.ntri)
#print('diff = ',ddd.n1n2n3.ntri - true_ntri_123)
np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddd.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddd.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddd.n3n2n1.ntri, true_ntri_321)
# And again with no top-level recursion
ddd = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2, cat3)
#print('max_top = 0: ddd.n1n2n3.ntri = ',ddd.n1n2n3n.ntri)
#print('true_ntri = ',true_ntri_123)
#print('diff = ',ddd.n1n2n3.ntri - true_ntri_123)
np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddd.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddd.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddd.n3n2n1.ntri, true_ntri_321)
# Also compare to using x,y,z rather than ra,dec,r
cat1 = treecorr.Catalog(x=x1, y=y1, z=z1)
cat2 = treecorr.Catalog(x=x2, y=y2, z=z2)
cat3 = treecorr.Catalog(x=x3, y=y3, z=z3)
ddd.process(cat1, cat2, cat3)
np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123)
np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213)
np.testing.assert_array_equal(ddd.n2n3n1.ntri, true_ntri_231)
np.testing.assert_array_equal(ddd.n3n1n2.ntri, true_ntri_312)
np.testing.assert_array_equal(ddd.n3n2n1.ntri, true_ntri_321)
@timer
def test_nnn():
# Use a simple probability distribution for the galaxies:
#
# n(r) = (2pi s^2)^-1 exp(-r^2/2s^2)
#
# The Fourier transform is: n~(k) = exp(-s^2 k^2/2)
# B(k1,k2) = <n~(k1) n~(k2) n~(-k1-k2)>
# = exp(-s^2 (|k1|^2 + |k2|^2 - k1.k2))
# = exp(-s^2 (|k1|^2 + |k2|^2 + |k3|^2)/2)
#
# zeta(r1,r2) = (1/2pi)^4 int(d^2k1 int(d^2k2 exp(ik1.x1) exp(ik2.x2) B(k1,k2) ))
# = exp(-(x1^2 + y1^2 + x2^2 + y2^2 - x1x2 - y1y2)/3s^2) / 12 pi^2 s^4
# = exp(-(d1^2 + d2^2 + d3^2)/6s^2) / 12 pi^2 s^4
#
# This is also derivable as:
# zeta(r1,r2) = int(dx int(dy n(x,y) n(x+x1,y+y1) n(x+x2,y+y2)))
# which is also analytically integrable and gives the same answer.
#
# However, we need to correct for the uniform density background, so the real result
# is this minus 1/L^4 divided by 1/L^4. So:
#
# zeta(r1,r2) = 1/(12 pi^2) (L/s)^4 exp(-(d1^2+d2^2+d3^2)/6s^2) - 1
# Doing the full correlation function takes a long time. Here, we just test a small range
# of separations and a moderate range for u, v, which gives us a variety of triangle lengths.
s = 10.
if __name__ == "__main__":
ngal = 20000
nrand = 2 * ngal
L = 50. * s # Not infinity, so this introduces some error. Our integrals were to infinity.
tol_factor = 1
else:
ngal = 2000
nrand = ngal
L = 20. * s
tol_factor = 5
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
min_sep = 11.
max_sep = 13.
nbins = 2
min_u = 0.6
max_u = 0.9
nubins = 3
min_v = 0.5
max_v = 0.9
nvbins = 5
cat = treecorr.Catalog(x=x, y=y, x_units='arcmin', y_units='arcmin')
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins,
sep_units='arcmin', verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
# log(<d>) != <logd>, but it should be close:
print('meanlogd1 - log(meand1) = ',ddd.meanlogd1 - np.log(ddd.meand1))
print('meanlogd2 - log(meand2) = ',ddd.meanlogd2 - np.log(ddd.meand2))
print('meanlogd3 - log(meand3) = ',ddd.meanlogd3 - np.log(ddd.meand3))
print('meand3 / meand2 = ',ddd.meand3 / ddd.meand2)
print('meanu = ',ddd.meanu)
print('max diff = ',np.max(np.abs(ddd.meand3/ddd.meand2 -ddd.meanu)))
print('max rel diff = ',np.max(np.abs((ddd.meand3/ddd.meand2 -ddd.meanu)/ddd.meanu)))
print('(meand1 - meand2)/meand3 = ',(ddd.meand1-ddd.meand2) / ddd.meand3)
print('meanv = ',ddd.meanv)
print('max diff = ',np.max(np.abs((ddd.meand1-ddd.meand2)/ddd.meand3 -np.abs(ddd.meanv))))
print('max rel diff = ',
np.max(np.abs(((ddd.meand1-ddd.meand2)/ddd.meand3-np.abs(ddd.meanv))/ddd.meanv)))
np.testing.assert_allclose(ddd.meanlogd1, np.log(ddd.meand1), rtol=1.e-3)
np.testing.assert_allclose(ddd.meanlogd2, np.log(ddd.meand2), rtol=1.e-3)
np.testing.assert_allclose(ddd.meanlogd3, np.log(ddd.meand3), rtol=1.e-3)
np.testing.assert_allclose(ddd.meand3/ddd.meand2, ddd.meanu, rtol=1.e-5 * tol_factor)
np.testing.assert_allclose((ddd.meand1-ddd.meand2)/ddd.meand3, np.abs(ddd.meanv),
rtol=1.e-5 * tol_factor, atol=1.e-5 * tol_factor)
np.testing.assert_allclose(ddd.meanlogd3-ddd.meanlogd2, np.log(ddd.meanu),
atol=1.e-3 * tol_factor)
np.testing.assert_allclose(np.log(ddd.meand1-ddd.meand2)-ddd.meanlogd3,
np.log(np.abs(ddd.meanv)), atol=2.e-3 * tol_factor)
rx = (rng.random_sample(nrand)-0.5) * L
ry = (rng.random_sample(nrand)-0.5) * L
rand = treecorr.Catalog(x=rx,y=ry, x_units='arcmin', y_units='arcmin')
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins,
sep_units='arcmin', verbose=1)
rrr.process(rand)
#print('rrr.ntri = ',rrr.ntri)
d1 = ddd.meand1
d2 = ddd.meand2
d3 = ddd.meand3
#print('rnom = ',np.exp(ddd.logr))
#print('unom = ',ddd.u)
#print('vnom = ',ddd.v)
#print('d1 = ',d1)
#print('d2 = ',d2)
#print('d3 = ',d3)
true_zeta = (1./(12.*np.pi**2)) * (L/s)**4 * np.exp(-(d1**2+d2**2+d3**2)/(6.*s**2)) - 1.
zeta, varzeta = ddd.calculateZeta(rrr)
print('zeta = ',zeta)
print('true_zeta = ',true_zeta)
print('ratio = ',zeta / true_zeta)
print('diff = ',zeta - true_zeta)
print('max rel diff = ',np.max(np.abs((zeta - true_zeta)/true_zeta)))
np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor)
np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)),
atol=0.1*tol_factor)
# Check that we get the same result using the corr3 function
cat.write(os.path.join('data','nnn_data.dat'))
rand.write(os.path.join('data','nnn_rand.dat'))
config = treecorr.config.read_config('configs/nnn.yaml')
config['verbose'] = 0
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
# Check the fits write option
out_file_name1 = os.path.join('output','nnn_out1.fits')
ddd.write(out_file_name1)
data = fitsio.read(out_file_name1)
np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(data['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(data['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(data['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(data['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(data['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(data['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(data['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(data['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(data['ntri'], ddd.ntri.flatten())
header = fitsio.read_header(out_file_name1, 1)
np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.)
out_file_name2 = os.path.join('output','nnn_out2.fits')
ddd.write(out_file_name2, rrr)
data = fitsio.read(out_file_name2)
np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(data['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(data['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(data['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(data['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(data['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(data['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(data['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(data['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(data['zeta'], zeta.flatten())
np.testing.assert_almost_equal(data['sigma_zeta'], np.sqrt(varzeta).flatten())
np.testing.assert_almost_equal(data['DDD'], ddd.ntri.flatten())
np.testing.assert_almost_equal(data['RRR'], rrr.ntri.flatten() * (ddd.tot / rrr.tot))
header = fitsio.read_header(out_file_name2, 1)
np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.)
# Check the read function
# Note: These don't need the flatten. The read function should reshape them to the right shape.
ddd2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins,
sep_units='arcmin', verbose=1)
ddd2.read(out_file_name1)
np.testing.assert_almost_equal(ddd2.logr, ddd.logr)
np.testing.assert_almost_equal(ddd2.u, ddd.u)
np.testing.assert_almost_equal(ddd2.v, ddd.v)
np.testing.assert_almost_equal(ddd2.meand1, ddd.meand1)
np.testing.assert_almost_equal(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_almost_equal(ddd2.meand2, ddd.meand2)
np.testing.assert_almost_equal(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_almost_equal(ddd2.meand3, ddd.meand3)
np.testing.assert_almost_equal(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_almost_equal(ddd2.meanu, ddd.meanu)
np.testing.assert_almost_equal(ddd2.meanv, ddd.meanv)
np.testing.assert_almost_equal(ddd2.ntri, ddd.ntri)
np.testing.assert_almost_equal(ddd2.tot/ddd.tot, 1.)
assert ddd2.coords == ddd.coords
assert ddd2.metric == ddd.metric
assert ddd2.sep_units == ddd.sep_units
assert ddd2.bin_type == ddd.bin_type
ddd2.read(out_file_name2)
np.testing.assert_almost_equal(ddd2.logr, ddd.logr)
np.testing.assert_almost_equal(ddd2.u, ddd.u)
np.testing.assert_almost_equal(ddd2.v, ddd.v)
np.testing.assert_almost_equal(ddd2.meand1, ddd.meand1)
np.testing.assert_almost_equal(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_almost_equal(ddd2.meand2, ddd.meand2)
np.testing.assert_almost_equal(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_almost_equal(ddd2.meand3, ddd.meand3)
np.testing.assert_almost_equal(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_almost_equal(ddd2.meanu, ddd.meanu)
np.testing.assert_almost_equal(ddd2.meanv, ddd.meanv)
np.testing.assert_almost_equal(ddd2.ntri, ddd.ntri)
np.testing.assert_almost_equal(ddd2.tot/ddd.tot, 1.)
assert ddd2.coords == ddd.coords
assert ddd2.metric == ddd.metric
assert ddd2.sep_units == ddd.sep_units
assert ddd2.bin_type == ddd.bin_type
# Check the hdf5 write option
try:
import h5py # noqa: F401
except ImportError:
print('Skipping hdf5 output file, since h5py not installed.')
else:
out_file_name3 = os.path.join('output','nnn_out3.hdf5')
ddd.write(out_file_name3, rrr)
with h5py.File(out_file_name3, 'r') as hdf:
data = hdf['/']
np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(data['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(data['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(data['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(data['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(data['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(data['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(data['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(data['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(data['ntri'], ddd.ntri.flatten())
np.testing.assert_almost_equal(data['zeta'], zeta.flatten())
np.testing.assert_almost_equal(data['sigma_zeta'], np.sqrt(varzeta).flatten())
np.testing.assert_almost_equal(data['DDD'], ddd.ntri.flatten())
np.testing.assert_almost_equal(data['RRR'], rrr.ntri.flatten() * (ddd.tot / rrr.tot))
attrs = data.attrs
np.testing.assert_almost_equal(attrs['tot']/ddd.tot, 1.)
ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins,
sep_units='arcmin', verbose=1)
ddd3.read(out_file_name3)
np.testing.assert_almost_equal(ddd3.logr, ddd.logr)
np.testing.assert_almost_equal(ddd3.u, ddd.u)
np.testing.assert_almost_equal(ddd3.v, ddd.v)
np.testing.assert_almost_equal(ddd3.meand1, ddd.meand1)
np.testing.assert_almost_equal(ddd3.meanlogd1, ddd.meanlogd1)
np.testing.assert_almost_equal(ddd3.meand2, ddd.meand2)
np.testing.assert_almost_equal(ddd3.meanlogd2, ddd.meanlogd2)
np.testing.assert_almost_equal(ddd3.meand3, ddd.meand3)
np.testing.assert_almost_equal(ddd3.meanlogd3, ddd.meanlogd3)
np.testing.assert_almost_equal(ddd3.meanu, ddd.meanu)
np.testing.assert_almost_equal(ddd3.meanv, ddd.meanv)
np.testing.assert_almost_equal(ddd3.ntri, ddd.ntri)
np.testing.assert_almost_equal(ddd3.tot/ddd.tot, 1.)
assert ddd3.coords == ddd.coords
assert ddd3.metric == ddd.metric
assert ddd3.sep_units == ddd.sep_units
assert ddd3.bin_type == ddd.bin_type
# Test compensated zeta
# First just check the mechanics.
# If we don't actually do all the cross terms, then compensated is the same as simple.
zeta2, varzeta2 = ddd.calculateZeta(rrr,drr=rrr,rdd=rrr)
print('fake compensated zeta = ',zeta2)
np.testing.assert_allclose(zeta2, zeta)
# Error to not have one of rrr, drr, rdd.
with assert_raises(TypeError):
ddd.calculateZeta(drr=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.calculateZeta(rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.calculateZeta(rrr,drr=rrr)
rrr2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, sep_units='arcmin')
# Error if any of them haven't been run yet.
with assert_raises(ValueError):
ddd.calculateZeta(rrr2,drr=rrr,rdd=rrr)
with assert_raises(ValueError):
ddd.calculateZeta(rrr,drr=rrr2,rdd=rrr)
with assert_raises(ValueError):
ddd.calculateZeta(rrr,drr=rrr,rdd=rrr2)
out_file_name3 = os.path.join('output','nnn_out3.fits')
with assert_raises(TypeError):
ddd.write(out_file_name3,drr=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.write(out_file_name3,rrr=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.write(out_file_name3,rrr=rrr,drr=rrr)
# It's too slow to test the real calculation in nosetests runs, so we stop here if not main.
if __name__ != '__main__':
return
# This version computes the three-point function after subtracting off the appropriate
# two-point functions xi(d1) + xi(d2) + xi(d3), where [cf. test_nn() in test_nn.py]
# xi(r) = 1/4pi (L/s)^2 exp(-r^2/4s^2) - 1
drr = ddd.copy()
rdd = ddd.copy()
drr.process(cat,rand)
rdd.process(rand,cat)
zeta, varzeta = ddd.calculateZeta(rrr,drr,rdd)
print('compensated zeta = ',zeta)
xi1 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d1**2/(4.*s**2)) - 1.
xi2 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d2**2/(4.*s**2)) - 1.
xi3 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d3**2/(4.*s**2)) - 1.
print('xi1 = ',xi1)
print('xi2 = ',xi2)
print('xi3 = ',xi3)
print('true_zeta + xi1 + xi2 + xi3 = ',true_zeta)
true_zeta -= xi1 + xi2 + xi3
print('true_zeta => ',true_zeta)
print('ratio = ',zeta / true_zeta)
print('diff = ',zeta - true_zeta)
print('max rel diff = ',np.max(np.abs((zeta - true_zeta)/true_zeta)))
np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor)
np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)), atol=0.1*tol_factor)
out_file_name3 = os.path.join('output','nnn_out3.fits')
ddd.write(out_file_name3, rrr,drr,rdd)
data = fitsio.read(out_file_name3)
np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(data['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(data['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(data['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(data['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(data['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(data['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(data['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(data['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(data['zeta'], zeta.flatten())
np.testing.assert_almost_equal(data['sigma_zeta'], np.sqrt(varzeta).flatten())
np.testing.assert_almost_equal(data['DDD'], ddd.ntri.flatten())
np.testing.assert_almost_equal(data['RRR'], rrr.ntri.flatten() * (ddd.tot / rrr.tot))
np.testing.assert_almost_equal(data['DRR'], drr.ntri.flatten() * (ddd.tot / drr.tot))
np.testing.assert_almost_equal(data['RDD'], rdd.ntri.flatten() * (ddd.tot / rdd.tot))
header = fitsio.read_header(out_file_name3, 1)
np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.)
ddd2.read(out_file_name3)
np.testing.assert_almost_equal(ddd2.logr, ddd.logr)
np.testing.assert_almost_equal(ddd2.u, ddd.u)
np.testing.assert_almost_equal(ddd2.v, ddd.v)
np.testing.assert_almost_equal(ddd2.meand1, ddd.meand1)
np.testing.assert_almost_equal(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_almost_equal(ddd2.meand2, ddd.meand2)
np.testing.assert_almost_equal(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_almost_equal(ddd2.meand3, ddd.meand3)
np.testing.assert_almost_equal(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_almost_equal(ddd2.meanu, ddd.meanu)
np.testing.assert_almost_equal(ddd2.meanv, ddd.meanv)
np.testing.assert_almost_equal(ddd2.ntri, ddd.ntri)
np.testing.assert_almost_equal(ddd2.tot/ddd.tot, 1.)
assert ddd2.coords == ddd.coords
assert ddd2.metric == ddd.metric
assert ddd2.sep_units == ddd.sep_units
assert ddd2.bin_type == ddd.bin_type
config = treecorr.config.read_config('configs/nnn_compensated.yaml')
config['verbose'] = 0
treecorr.corr3(config)
corr3_outfile = os.path.join('output','nnn_compensated.fits')
corr3_output = fitsio.read(corr3_outfile)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_almost_equal(corr3_output['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(corr3_output['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(corr3_output['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(corr3_output['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(corr3_output['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(corr3_output['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(corr3_output['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(corr3_output['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(corr3_output['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(corr3_output['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(corr3_output['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(corr3_output['zeta'], zeta.flatten())
np.testing.assert_almost_equal(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten())
np.testing.assert_almost_equal(corr3_output['DDD'], ddd.ntri.flatten())
np.testing.assert_almost_equal(corr3_output['RRR'], rrr.ntri.flatten() * (ddd.tot / rrr.tot))
np.testing.assert_almost_equal(corr3_output['DRR'], drr.ntri.flatten() * (ddd.tot / drr.tot))
np.testing.assert_almost_equal(corr3_output['RDD'], rdd.ntri.flatten() * (ddd.tot / rdd.tot))
header = fitsio.read_header(corr3_outfile, 1)
np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.)
@timer
def test_3d():
# For this one, build a Gaussian cloud around some random point in 3D space and do the
# correlation function in 3D.
#
# The 3D Fourier transform is: n~(k) = exp(-s^2 k^2/2)
# B(k1,k2) = <n~(k1) n~(k2) n~(-k1-k2)>
# = exp(-s^2 (|k1|^2 + |k2|^2 - k1.k2))
# = exp(-s^2 (|k1|^2 + |k2|^2 + |k3|^2)/2)
# as before, except now k1,k2 are 3d vectors, not 2d.
#
# zeta(r1,r2) = (1/2pi)^4 int(d^2k1 int(d^2k2 exp(ik1.x1) exp(ik2.x2) B(k1,k2) ))
# = exp(-(x1^2 + y1^2 + x2^2 + y2^2 - x1x2 - y1y2)/3s^2) / 12 pi^2 s^4
# = exp(-(d1^2 + d2^2 + d3^2)/6s^2) / 24 sqrt(3) pi^3 s^6
#
# And again, this is also derivable as:
# zeta(r1,r2) = int(dx int(dy int(dz n(x,y,z) n(x+x1,y+y1,z+z1) n(x+x2,y+y2,z+z2)))
# which is also analytically integrable and gives the same answer.
#
# However, we need to correct for the uniform density background, so the real result
# is this minus 1/L^6 divided by 1/L^6. So:
#
# zeta(r1,r2) = 1/(24 sqrt(3) pi^3) (L/s)^4 exp(-(d1^2+d2^2+d3^2)/6s^2) - 1
# Doing the full correlation function takes a long time. Here, we just test a small range
# of separations and a moderate range for u, v, which gives us a variety of triangle lengths.
xcen = 823 # Mpc maybe?
ycen = 342
zcen = -672
s = 10.
if __name__ == "__main__":
ngal = 5000
nrand = 20 * ngal
L = 50. * s
tol_factor = 1
else:
ngal = 1000
nrand = 5 * ngal
L = 20. * s
tol_factor = 5
rng = np.random.RandomState(8675309)
x = rng.normal(xcen, s, (ngal,) )
y = rng.normal(ycen, s, (ngal,) )
z = rng.normal(zcen, s, (ngal,) )
r = np.sqrt(x*x+y*y+z*z)
dec = np.arcsin(z/r) * (coord.radians / coord.degrees)
ra = np.arctan2(y,x) * (coord.radians / coord.degrees)
min_sep = 10.
max_sep = 20.
nbins = 8
min_u = 0.9
max_u = 1.0
nubins = 1
min_v = 0.
max_v = 0.05
nvbins = 1
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='deg', dec_units='deg')
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, verbose=1)
ddd.process(cat)
print('ddd.ntri = ',ddd.ntri.flatten())
rx = (rng.random_sample(nrand)-0.5) * L + xcen
ry = (rng.random_sample(nrand)-0.5) * L + ycen
rz = (rng.random_sample(nrand)-0.5) * L + zcen
rr = np.sqrt(rx*rx+ry*ry+rz*rz)
rdec = np.arcsin(rz/rr) * (coord.radians / coord.degrees)
rra = np.arctan2(ry,rx) * (coord.radians / coord.degrees)
rand = treecorr.Catalog(ra=rra, dec=rdec, r=rr, ra_units='deg', dec_units='deg')
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, verbose=1)
rrr.process(rand)
print('rrr.ntri = ',rrr.ntri.flatten())
d1 = ddd.meand1
d2 = ddd.meand2
d3 = ddd.meand3
print('rnom = ',np.exp(ddd.logr).flatten())
print('unom = ',ddd.u.flatten())
print('vnom = ',ddd.v.flatten())
print('d1 = ',d1.flatten())
print('d2 = ',d2.flatten())
print('d3 = ',d3.flatten())
true_zeta = ((1./(24.*np.sqrt(3)*np.pi**3)) * (L/s)**6 *
np.exp(-(d1**2+d2**2+d3**2)/(6.*s**2)) - 1.)
zeta, varzeta = ddd.calculateZeta(rrr)
print('zeta = ',zeta.flatten())
print('true_zeta = ',true_zeta.flatten())
print('ratio = ',(zeta / true_zeta).flatten())
print('diff = ',(zeta - true_zeta).flatten())
print('max rel diff = ',np.max(np.abs((zeta - true_zeta)/true_zeta)))
np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor)
np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)),
atol=0.1*tol_factor)
# Check that we get the same result using the corr3 functin:
cat.write(os.path.join('data','nnn_3d_data.dat'))
rand.write(os.path.join('data','nnn_3d_rand.dat'))
config = treecorr.config.read_config('configs/nnn_3d.yaml')
config['verbose'] = 0
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_3d.out'), names=True, skip_header=1)
print('zeta = ',zeta.flatten())
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
# Check that we get the same thing when using x,y,z rather than ra,dec,r
cat = treecorr.Catalog(x=x, y=y, z=z)
rand = treecorr.Catalog(x=rx, y=ry, z=rz)
ddd.process(cat)
rrr.process(rand)
zeta, varzeta = ddd.calculateZeta(rrr)
np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor)
np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)),
atol=0.1*tol_factor)
@timer
def test_list():
# Test that we can use a list of files for either data or rand or both.
data_cats = []
rand_cats = []
ncats = 3
ngal = 100
nrand = 2 * ngal
s = 10.
L = 50. * s
rng = np.random.RandomState(8675309)
min_sep = 30.
max_sep = 50.
nbins = 3
min_u = 0
max_u = 0.2
nubins = 2
min_v = 0.5
max_v = 0.9
nvbins = 2
x = rng.normal(0,s, (ngal,ncats) )
y = rng.normal(0,s, (ngal,ncats) )
data_cats = [ treecorr.Catalog(x=x[:,k], y=y[:,k]) for k in range(ncats) ]
rx = (rng.random_sample((nrand,ncats))-0.5) * L
ry = (rng.random_sample((nrand,ncats))-0.5) * L
rand_cats = [ treecorr.Catalog(x=rx[:,k], y=ry[:,k]) for k in range(ncats) ]
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1)
ddd.process(data_cats)
print('From multiple catalogs: ddd.ntri = ',ddd.ntri)
print('tot = ',ddd.tot)
# Now do the same thing with one big catalog
dddx = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1)
data_catx = treecorr.Catalog(x=x.reshape( (ngal*ncats,) ), y=y.reshape( (ngal*ncats,) ))
dddx.process(data_catx)
print('From single catalog: dddx.ntri = ',dddx.ntri)
print('tot = ',dddx.tot)
# Only test to rtol=0.1, since there are now differences between the auto and cross related
# to how they characterize triangles especially when d1 ~= d2 or d2 ~= d3.
np.testing.assert_allclose(ddd.ntri, dddx.ntri, rtol=0.1)
np.testing.assert_allclose(ddd.tot, dddx.tot)
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1)
rrr.process(rand_cats)
print('rrr.ntri = ',rrr.ntri)
rrrx = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1)
rand_catx = treecorr.Catalog(x=rx.reshape( (nrand*ncats,) ), y=ry.reshape( (nrand*ncats,) ))
rrrx.process(rand_catx)
print('rrrx.ntri = ',rrrx.ntri)
np.testing.assert_allclose(rrr.ntri, rrrx.ntri, rtol=0.1)
np.testing.assert_allclose(rrr.tot, rrrx.tot)
zeta, varzeta = ddd.calculateZeta(rrr)
zetax, varzetax = dddx.calculateZeta(rrrx)
print('zeta = ',zeta)
print('zetax = ',zetax)
#print('ratio = ',zeta/zetax)
#print('diff = ',zeta-zetax)
np.testing.assert_allclose(zeta, zetax, rtol=0.1)
# Check that we get the same result using the corr3 function:
file_list = []
rand_file_list = []
for k in range(ncats):
file_name = os.path.join('data','nnn_list_data%d.dat'%k)
data_cats[k].write(file_name)
file_list.append(file_name)
rand_file_name = os.path.join('data','nnn_list_rand%d.dat'%k)
rand_cats[k].write(rand_file_name)
rand_file_list.append(rand_file_name)
list_name = os.path.join('data','nnn_list_data_files.txt')
with open(list_name, 'w') as fid:
for file_name in file_list:
fid.write('%s\n'%file_name)
rand_list_name = os.path.join('data','nnn_list_rand_files.txt')
with open(rand_list_name, 'w') as fid:
for file_name in rand_file_list:
fid.write('%s\n'%file_name)
file_namex = os.path.join('data','nnn_list_datax.dat')
data_catx.write(file_namex)
rand_file_namex = os.path.join('data','nnn_list_randx.dat')
rand_catx.write(rand_file_namex)
config = treecorr.config.read_config('configs/nnn_list1.yaml')
config['verbose'] = 0
config['bin_slop'] = 0.1
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_list1.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
config = treecorr.config.read_config('configs/nnn_list2.json')
config['verbose'] = 0
config['bin_slop'] = 0.1
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_list2.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=0.05)
config = treecorr.config.read_config('configs/nnn_list3.params')
config['verbose'] = 0
config['bin_slop'] = 0.1
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_list3.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=0.05)
config = treecorr.config.read_config('configs/nnn_list4.config', file_type='params')
config['verbose'] = 0
config['bin_slop'] = 0.1
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_list4.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
if __name__ == '__main__':
test_log_binning()
test_direct_count_auto()
test_direct_count_cross()
test_direct_count_cross12()
test_direct_spherical()
test_direct_arc()
test_direct_partial()
test_direct_3d_auto()
test_direct_3d_cross()
test_nnn()
test_3d()
test_list()
| 46.516188
| 100
| 0.601854
| 19,531
| 126,431
| 3.715632
| 0.038298
| 0.058537
| 0.097561
| 0.06048
| 0.869547
| 0.83817
| 0.817762
| 0.790699
| 0.73806
| 0.709453
| 0
| 0.049049
| 0.256124
| 126,431
| 2,717
| 101
| 46.533309
| 0.72257
| 0.120817
| 0
| 0.647032
| 0
| 0
| 0.033915
| 0.00324
| 0
| 0
| 0
| 0
| 0.333952
| 1
| 0.006494
| false
| 0
| 0.006494
| 0
| 0.013915
| 0.05334
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f7dfd6651ddb4baefa5df432a99d00a337572179
| 111
|
py
|
Python
|
app.py
|
vinicius-cardoso/crm-gestao-interna
|
f57d6f83e2fabe4cb1d3185f73acf21c9e885a10
|
[
"MIT"
] | null | null | null |
app.py
|
vinicius-cardoso/crm-gestao-interna
|
f57d6f83e2fabe4cb1d3185f73acf21c9e885a10
|
[
"MIT"
] | null | null | null |
app.py
|
vinicius-cardoso/crm-gestao-interna
|
f57d6f83e2fabe4cb1d3185f73acf21c9e885a10
|
[
"MIT"
] | null | null | null |
from crm import app
from crm import db
if(__name__ == "__main__"):
db.create_all()
app.run(debug=True)
| 18.5
| 27
| 0.684685
| 18
| 111
| 3.722222
| 0.722222
| 0.208955
| 0.38806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 111
| 6
| 28
| 18.5
| 0.744444
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
79046d31ed91366118a39dcf7f70c3237ed795ac
| 39
|
py
|
Python
|
a1d05eba1/utils/__init__.py
|
dorey/a1d05eba1
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
[
"0BSD"
] | null | null | null |
a1d05eba1/utils/__init__.py
|
dorey/a1d05eba1
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
[
"0BSD"
] | 28
|
2020-06-23T19:00:58.000Z
|
2021-03-26T22:13:07.000Z
|
a1d05eba1/utils/__init__.py
|
dorey/a1d05eba1
|
eb6f66a946f3c417ab6bf9047ba9715be071967c
|
[
"0BSD"
] | null | null | null |
from .kfrozendict import kassertfrozen
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f702b5e51d59cc678d28c85bdace0ba9bb5040f9
| 120
|
py
|
Python
|
hydromt/workflows/__init__.py
|
couasnonanais/hydromt
|
6ff3bb6e76cea8247be171f1fe781c0cbb7e9c9e
|
[
"MIT"
] | null | null | null |
hydromt/workflows/__init__.py
|
couasnonanais/hydromt
|
6ff3bb6e76cea8247be171f1fe781c0cbb7e9c9e
|
[
"MIT"
] | null | null | null |
hydromt/workflows/__init__.py
|
couasnonanais/hydromt
|
6ff3bb6e76cea8247be171f1fe781c0cbb7e9c9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""HydroMT workflows"""
from .basin_mask import *
from .forcing import *
from .rivers import *
| 17.142857
| 25
| 0.658333
| 15
| 120
| 5.2
| 0.733333
| 0.25641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0.166667
| 120
| 6
| 26
| 20
| 0.77
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f74dab577042e68b31ea2e93553c8adf6ffc5042
| 74
|
py
|
Python
|
medgpc/visualization/__init__.py
|
bee-hive/MedGP
|
596a24ca519900507cce42cb4e2061319cef801e
|
[
"BSD-3-Clause"
] | 25
|
2018-03-18T18:09:03.000Z
|
2022-02-24T07:47:33.000Z
|
medgpc/visualization/__init__.py
|
bee-hive/MedGP
|
596a24ca519900507cce42cb4e2061319cef801e
|
[
"BSD-3-Clause"
] | 3
|
2021-04-12T16:11:00.000Z
|
2021-04-12T16:26:17.000Z
|
medgpc/visualization/__init__.py
|
bee-hive/MedGP
|
596a24ca519900507cce42cb4e2061319cef801e
|
[
"BSD-3-Clause"
] | 4
|
2019-04-27T23:18:26.000Z
|
2021-12-03T20:19:09.000Z
|
from . import fastkernel
from . import vizkernel
from . import printkernel
| 24.666667
| 25
| 0.810811
| 9
| 74
| 6.666667
| 0.555556
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148649
| 74
| 3
| 25
| 24.666667
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f78915246c99b94dc26c5a60ad0e5e6b320a7dbc
| 42
|
py
|
Python
|
gym_nats/envs/__init__.py
|
austrian-code-wizard/gym-nats
|
df5f6efb34fefaba3186ad225c45ca296a1f095a
|
[
"MIT"
] | 1
|
2020-09-29T17:56:21.000Z
|
2020-09-29T17:56:21.000Z
|
gym_nats/envs/__init__.py
|
austrian-code-wizard/gym-nats
|
df5f6efb34fefaba3186ad225c45ca296a1f095a
|
[
"MIT"
] | 4
|
2020-09-29T10:17:06.000Z
|
2020-09-29T10:19:37.000Z
|
gym_nats/envs/__init__.py
|
austrian-code-wizard/gym-nats
|
df5f6efb34fefaba3186ad225c45ca296a1f095a
|
[
"MIT"
] | null | null | null |
from gym_nats.envs.nats_env import NatsEnv
| 42
| 42
| 0.880952
| 8
| 42
| 4.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e3c59c62075152a0eacfeb67165a805310979f4f
| 9,572
|
py
|
Python
|
src/pyfme/utils/tests/test_coordinates.py
|
jdebecdelievre/PyFME
|
45a46c9dccfaf4961dc9a7320ff43a24e28eb4e4
|
[
"MIT"
] | 1
|
2021-01-24T19:34:46.000Z
|
2021-01-24T19:34:46.000Z
|
src/pyfme/utils/tests/test_coordinates.py
|
jdebecdelievre/PyFME
|
45a46c9dccfaf4961dc9a7320ff43a24e28eb4e4
|
[
"MIT"
] | null | null | null |
src/pyfme/utils/tests/test_coordinates.py
|
jdebecdelievre/PyFME
|
45a46c9dccfaf4961dc9a7320ff43a24e28eb4e4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Frames of Reference orientation test functions
----------------------------------------------
"""
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal)
from pyfme.utils.coordinates import (body2hor, hor2body,
check_theta_phi_psi_range,
hor2wind, wind2hor,
check_gamma_mu_chi_range,
body2wind, wind2body,
check_alpha_beta_range)
def test_check_theta_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_theta_phi_psi_range(value, 0, 0)
assert ("ValueError: Theta value is not inside correct range"
in excinfo.exconly())
def test_check_phi_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_theta_phi_psi_range(0, value, 0)
assert ("ValueError: Phi value is not inside correct range"
in excinfo.exconly())
def test_check_psi_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_theta_phi_psi_range(0, 0, value)
assert ("ValueError: Psi value is not inside correct range"
in excinfo.exconly())
def test_body2hor():
# Test with a pitch rotation
vector_body = np.array([1, 1, 1])
theta, phi, psi = np.deg2rad(45), 0, 0
vector_hor = body2hor(vector_body, theta, phi, psi)
vector_hor_expected = np.array([2 * 0.70710678118654757, 1, 0])
assert_array_almost_equal(vector_hor, vector_hor_expected)
# Test with a roll rotation
vector_body = np.array([1, 1, 1])
theta, phi, psi = 0, np.deg2rad(45), 0
vector_hor = body2hor(vector_body, theta, phi, psi)
vector_hor_expected = np.array([1, 0, 2 * 0.70710678118654757])
assert_array_almost_equal(vector_hor, vector_hor_expected)
# Test with a yaw rotation
vector_body = np.array([1, 1, 1])
theta, phi, psi = 0, 0, np.deg2rad(45)
vector_hor = body2hor(vector_body, theta, phi, psi)
vector_hor_expected = np.array([0, 2 * 0.70710678118654757, 1])
assert_array_almost_equal(vector_hor, vector_hor_expected)
def test_hor2body():
# Test with a pitch rotation
vector_hor = np.array([2 * 0.70710678118654757, 1, 0])
theta, phi, psi = np.deg2rad(45), 0, 0
vector_body_expected = np.array([1, 1, 1])
vector_body = hor2body(vector_hor, theta, phi, psi)
assert_array_almost_equal(vector_body, vector_body_expected)
# Test with a roll rotation
vector_hor = np.array([1, 0, 2 * 0.70710678118654757])
theta, phi, psi = 0, np.deg2rad(45), 0
vector_body_expected = np.array([1, 1, 1])
vector_body = hor2body(vector_hor, theta, phi, psi)
assert_array_almost_equal(vector_body, vector_body_expected)
# Test with a yaw rotation
vector_hor = np.array([0, 2 * 0.70710678118654757, 1])
theta, phi, psi = 0, 0, np.deg2rad(45)
vector_body_expected = np.array([1, 1, 1])
vector_body = hor2body(vector_hor, theta, phi, psi)
assert_array_almost_equal(vector_body, vector_body_expected)
def test_check_gamma_mu_chi_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
angles = [0, 0, 0]
for ii in range(3):
angles[ii] = value
with pytest.raises(ValueError):
check_gamma_mu_chi_range(*angles)
def test_check_gamma_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_gamma_mu_chi_range(value, 0, 0)
assert ("ValueError: Gamma value is not inside correct range"
in excinfo.exconly())
def test_check_mu_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_gamma_mu_chi_range(0, value, 0)
assert ("ValueError: Mu value is not inside correct range"
in excinfo.exconly())
def test_check_chi_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_gamma_mu_chi_range(0, 0, value)
assert ("ValueError: Chi value is not inside correct range"
in excinfo.exconly())
def test_wind2hor():
# Test with a pitch rotation
vector_wind = np.array([1, 1, 1])
gamma, mu, chi = np.deg2rad(45), 0, 0
vector_hor = wind2hor(vector_wind, gamma, mu, chi)
vector_hor_expected = np.array([2 * 0.70710678118654757, 1, 0])
assert_array_almost_equal(vector_hor, vector_hor_expected)
# Test with a roll rotation
vector_wind = np.array([1, 1, 1])
gamma, mu, chi = 0, np.deg2rad(45), 0
vector_hor = wind2hor(vector_wind, gamma, mu, chi)
vector_hor_expected = np.array([1, 0, 2 * 0.70710678118654757])
assert_array_almost_equal(vector_hor, vector_hor_expected)
# Test with a yaw rotation
vector_wind = np.array([1, 1, 1])
gamma, mu, chi = 0, 0, np.deg2rad(45)
vector_hor = wind2hor(vector_wind, gamma, mu, chi)
vector_hor_expected = np.array([0, 2 * 0.70710678118654757, 1])
assert_array_almost_equal(vector_hor, vector_hor_expected)
def test_hor2wind():
# Test with a pitch rotation
vector_hor = np.array([2 * 0.70710678118654757, 1, 0])
gamma, mu, chi = np.deg2rad(45), 0, 0
vector_wind_expected = np.array([1, 1, 1])
vector_wind = hor2wind(vector_hor, gamma, mu, chi)
assert_array_almost_equal(vector_wind, vector_wind_expected)
# Test with a roll rotation
vector_hor = np.array([1, 0, 2 * 0.70710678118654757])
gamma, mu, chi = 0, np.deg2rad(45), 0
vector_wind_expected = np.array([1, 1, 1])
vector_wind = hor2wind(vector_hor, gamma, mu, chi)
assert_array_almost_equal(vector_wind, vector_wind_expected)
# Test with a yaw rotation
vector_hor = np.array([0, 2 * 0.70710678118654757, 1])
gamma, mu, chi = 0, 0, np.deg2rad(45)
vector_wind_expected = np.array([1, 1, 1])
vector_wind = hor2wind(vector_hor, gamma, mu, chi)
assert_array_almost_equal(vector_wind, vector_wind_expected)
def test_check_alpha_beta_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
angles = [0, 0]
for ii in range(2):
angles[ii] = value
with pytest.raises(ValueError):
check_alpha_beta_range(*angles)
def test_check_alpha_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_alpha_beta_range(value, 0)
assert ("ValueError: Alpha value is not inside correct range"
in excinfo.exconly())
def test_check_beta_range():
wrong_values = (3 * np.pi, - 3 * np.pi)
for value in wrong_values:
# 0 is always a correct value
with pytest.raises(ValueError) as excinfo:
check_alpha_beta_range(0, value)
assert ("ValueError: Beta value is not inside correct range"
in excinfo.exconly())
def test_wind2body():
# Test with an increment of the angle of attack
vector_wind = np.array([1, 1, 1])
alpha, beta = np.deg2rad(45), 0
vector_body = wind2body(vector_wind, alpha, beta)
vector_body_expected = np.array([0, 1, 2 * 0.70710678118654757])
assert_array_almost_equal(vector_body, vector_body_expected)
# Test with an increment of the sideslip angle
vector_wind = np.array([1, 1, 1])
alpha, beta = 0, np.deg2rad(45)
vector_body = wind2body(vector_wind, alpha, beta)
vector_body_expected = np.array([0, 2 * 0.70710678118654757, 1])
assert_array_almost_equal(vector_body, vector_body_expected)
def test_body2wind():
# Test with an increment of the angle of attack
vector_body = np.array([0, 1, 2 * 0.70710678118654757])
alpha, beta = np.deg2rad(45), 0
vector_wind = body2wind(vector_body, alpha, beta)
vector_wind_expected = np.array([1, 1, 1])
assert_array_almost_equal(vector_wind, vector_wind_expected)
# Test with an increment of the sideslip angle
vector_body = np.array([0, 2 * 0.70710678118654757, 1])
alpha, beta = 0, np.deg2rad(45)
vector_wind = body2wind(vector_body, alpha, beta)
vector_wind_expected = np.array([1, 1, 1])
assert_array_almost_equal(vector_wind, vector_wind_expected)
| 29.726708
| 70
| 0.625052
| 1,327
| 9,572
| 4.293142
| 0.07159
| 0.056872
| 0.017553
| 0.065649
| 0.909602
| 0.889064
| 0.85624
| 0.840618
| 0.815868
| 0.783921
| 0
| 0.077709
| 0.274028
| 9,572
| 321
| 71
| 29.819315
| 0.742121
| 0.10677
| 0
| 0.694611
| 0
| 0
| 0.048566
| 0
| 0
| 0
| 0
| 0
| 0.149701
| 1
| 0.095808
| false
| 0
| 0.023952
| 0
| 0.11976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e3c956558e71f4a8d25a306993eabe2518780e56
| 40
|
py
|
Python
|
urwid_readline/__init__.py
|
zee-bit/urwid_readline
|
cdb8e62ce3c94f99e9a70ebde69625840583fa5c
|
[
"MIT"
] | 21
|
2017-11-05T17:26:04.000Z
|
2021-10-05T00:50:45.000Z
|
urwid_readline/__init__.py
|
zee-bit/urwid_readline
|
cdb8e62ce3c94f99e9a70ebde69625840583fa5c
|
[
"MIT"
] | 18
|
2017-11-05T17:26:00.000Z
|
2021-12-31T07:52:20.000Z
|
urwid_readline/__init__.py
|
zee-bit/urwid_readline
|
cdb8e62ce3c94f99e9a70ebde69625840583fa5c
|
[
"MIT"
] | 10
|
2018-05-22T09:10:47.000Z
|
2022-02-14T20:27:41.000Z
|
from .readline_edit import ReadlineEdit
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e3d136d86f590b0995d80fea1bd3f131b5155428
| 38
|
py
|
Python
|
lib/bindings/vs/__init__.py
|
tlalexander/stitchEm
|
cdff821ad2c500703e6cb237ec61139fce7bf11c
|
[
"MIT"
] | 182
|
2019-04-19T12:38:30.000Z
|
2022-03-20T16:48:20.000Z
|
lib/bindings/vs/__init__.py
|
doymcc/stitchEm
|
20693a55fa522d7a196b92635e7a82df9917c2e2
|
[
"MIT"
] | 107
|
2019-04-23T10:49:35.000Z
|
2022-03-02T18:12:28.000Z
|
lib/bindings/vs/__init__.py
|
doymcc/stitchEm
|
20693a55fa522d7a196b92635e7a82df9917c2e2
|
[
"MIT"
] | 59
|
2019-06-04T11:27:25.000Z
|
2022-03-17T23:49:49.000Z
|
from vs import *
from camera import *
| 12.666667
| 20
| 0.736842
| 6
| 38
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 38
| 2
| 21
| 19
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e3f329cd05977df6d7d0354eb1b592bd776b5a0b
| 108
|
py
|
Python
|
tests/conftest.py
|
SADevs/barbacoa
|
769b8122fe52be298b086a7fcab9745732c43c06
|
[
"Apache-2.0"
] | 4
|
2018-12-06T23:43:53.000Z
|
2019-03-17T23:48:19.000Z
|
tests/conftest.py
|
SADevs/barbacoa
|
769b8122fe52be298b086a7fcab9745732c43c06
|
[
"Apache-2.0"
] | 7
|
2018-12-07T00:36:46.000Z
|
2019-04-28T19:41:36.000Z
|
tests/conftest.py
|
SADevs/barbacoa
|
769b8122fe52be298b086a7fcab9745732c43c06
|
[
"Apache-2.0"
] | 2
|
2018-12-07T04:35:59.000Z
|
2018-12-07T23:47:56.000Z
|
# -*- coding: utf-8 -*-
import pytest
import barbacoa
@pytest.fixture
def hub():
return barbacoa.hub
| 10.8
| 23
| 0.666667
| 14
| 108
| 5.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 0.194444
| 108
| 9
| 24
| 12
| 0.816092
| 0.194444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
540186823cb5ad44e68b7c4ca2e6f5cd75bc080d
| 244
|
py
|
Python
|
npc_engine/exporters/__init__.py
|
npc-engine/npc-engine
|
0047794e96369c23515f794a1e77009c516a382c
|
[
"MIT"
] | 12
|
2021-11-10T21:03:19.000Z
|
2022-03-21T21:55:34.000Z
|
npc_engine/exporters/__init__.py
|
npc-engine/npc-engine
|
0047794e96369c23515f794a1e77009c516a382c
|
[
"MIT"
] | 1
|
2021-12-05T14:51:44.000Z
|
2021-12-05T14:51:44.000Z
|
npc_engine/exporters/__init__.py
|
npc-engine/npc-engine
|
0047794e96369c23515f794a1e77009c516a382c
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from npc_engine.exporters.hf_chatbot_exporter import HfChatbotExporter
from npc_engine.exporters.hf_classifier_exporter import HfClassifierExporter
from npc_engine.exporters.hf_similarity_exporter import HfSimilarityExporter
| 48.8
| 77
| 0.889344
| 29
| 244
| 7.172414
| 0.517241
| 0.100962
| 0.1875
| 0.317308
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004444
| 0.077869
| 244
| 4
| 78
| 61
| 0.92
| 0.04918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
541fe2385f644e16a7221af362a935ef88010c3c
| 217
|
py
|
Python
|
runtime/bamboo-pipeline/test/conftest.py
|
DomineCore/bamboo-engine
|
fb4583e70f9e1e87d9d48c2393db8d8104306f37
|
[
"MIT"
] | 55
|
2021-09-07T11:50:35.000Z
|
2022-03-23T13:19:38.000Z
|
runtime/bamboo-pipeline/test/conftest.py
|
DomineCore/bamboo-engine
|
fb4583e70f9e1e87d9d48c2393db8d8104306f37
|
[
"MIT"
] | 64
|
2021-09-07T12:04:12.000Z
|
2022-03-29T03:47:18.000Z
|
runtime/bamboo-pipeline/test/conftest.py
|
DomineCore/bamboo-engine
|
fb4583e70f9e1e87d9d48c2393db8d8104306f37
|
[
"MIT"
] | 20
|
2021-09-07T11:52:08.000Z
|
2022-03-28T08:05:22.000Z
|
from django.db.backends.base.base import BaseDatabaseWrapper
from pytest_django.plugin import _blocking_manager
_blocking_manager.unblock()
_blocking_manager._blocking_wrapper = BaseDatabaseWrapper.ensure_connection
| 36.166667
| 75
| 0.889401
| 25
| 217
| 7.32
| 0.6
| 0.245902
| 0.251366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059908
| 217
| 5
| 76
| 43.4
| 0.897059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
542b9629e9d6bb252357d8c98764a275340b901e
| 5,935
|
py
|
Python
|
VGG19Model.py
|
PashaIanko/Covid19Classifier
|
ee75a2b17babb8c9701351dfaa6052afa083168f
|
[
"MIT"
] | null | null | null |
VGG19Model.py
|
PashaIanko/Covid19Classifier
|
ee75a2b17babb8c9701351dfaa6052afa083168f
|
[
"MIT"
] | 1
|
2022-01-27T13:30:38.000Z
|
2022-01-27T13:30:38.000Z
|
VGG19Model.py
|
PashaIanko/Covid19Classifier
|
ee75a2b17babb8c9701351dfaa6052afa083168f
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Model as tf_Model
from Model import Model
from PreprocessingParameters import PreprocessingParameters
from DataProperties import DataProperties
class VGG19Model(Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def construct_model(self):
# inputs = layers.Input(
# shape = PreprocessingParameters.target_shape + \
# PreprocessingParameters.n_color_channels
# )
# resize = tf.keras.layers.Lambda(
# lambda image: tf.image.resize(
# image,
# (224, 224),
# preserve_aspect_ratio = True
# )
# )(inputs)
# x = Conv2D(input_shape = (224, 224, 3), filters = 64, kernel_size = (3, 3), activation = 'relu', padding = 'same')(resize)
# x = Conv2D(filters = 64, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)
# x = Conv2D(filters = 128, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 128, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)
# x = Conv2D(filters = 256, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 256, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 256, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 256, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)
# x = Conv2D(filters = 512, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 512, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 512, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 512, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)
# x = Conv2D(filters = 512, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 512, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 512, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = Conv2D(filters = 512, kernel_size = (3, 3), activation = 'relu', padding = 'same')(x)
# x = MaxPooling2D(pool_size = (2, 2), strides = (2, 2))(x)
# x = Flatten()(x)
# x = Dense(units = 4096, activation = 'relu')(x)
# x = Dense(units = 4096, activation = 'relu')(x)
# predictions = Dense(units = DataProperties.n_classes, activation = 'softmax')(x)
# self.model = tf_Model(inputs = inputs, outputs = predictions)
model = tf.keras.models.Sequential()
model.add(layers.Conv2D(64, kernel_size = (3,3), padding = 'same', activation = 'relu', input_shape = (224, 224, 3), kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(64, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.MaxPooling2D(pool_size = (2,2), strides = (2,2)))
model.add(layers.Conv2D(128, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(128, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.MaxPooling2D(pool_size = (2,2), strides = (2,2)))
model.add(layers.Conv2D(256, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(256, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(256, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(256, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.MaxPooling2D(pool_size = (2,2), strides = (2,2)))
model.add(layers.Conv2D(512, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(512, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(512, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(512, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.MaxPooling2D(pool_size = (2,2), strides = (2,2)))
model.add(layers.Conv2D(512, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(512, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(512, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.Conv2D(512, kernel_size = (3,3), padding = 'same', activation = 'relu', kernel_initializer = 'he_normal'))
model.add(layers.MaxPooling2D(pool_size = (2,2), strides = (2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(4096, activation = 'relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(4096, activation = 'relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(3, activation = 'softmax'))
self.model = model
| 59.949495
| 159
| 0.609773
| 747
| 5,935
| 4.720214
| 0.091031
| 0.142938
| 0.09983
| 0.108905
| 0.811117
| 0.801475
| 0.801475
| 0.801475
| 0.777085
| 0.777085
| 0
| 0.060613
| 0.218871
| 5,935
| 98
| 160
| 60.561224
| 0.699957
| 0.387532
| 0
| 0.589744
| 0
| 0
| 0.079811
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
583d1bbefbaffec19a503ef7755d347261131277
| 978
|
py
|
Python
|
python/src/chirpstack_api/as_pb/external/api/__init__.py
|
sophiekovalevsky/chirpstack-api
|
c9f3cb3e2a006d42286f046ba7cfcfa716512da3
|
[
"MIT"
] | 55
|
2019-11-05T15:46:49.000Z
|
2022-03-23T14:31:33.000Z
|
python/src/chirpstack_api/as_pb/external/api/__init__.py
|
sophiekovalevsky/chirpstack-api
|
c9f3cb3e2a006d42286f046ba7cfcfa716512da3
|
[
"MIT"
] | 39
|
2019-11-08T21:03:45.000Z
|
2022-03-01T12:40:36.000Z
|
python/src/chirpstack_api/as_pb/external/api/__init__.py
|
sophiekovalevsky/chirpstack-api
|
c9f3cb3e2a006d42286f046ba7cfcfa716512da3
|
[
"MIT"
] | 101
|
2019-11-22T13:59:59.000Z
|
2022-03-14T09:52:46.000Z
|
from .application_pb2 import *
from .application_pb2_grpc import *
from .device_pb2 import *
from .device_pb2_grpc import *
from .deviceProfile_pb2_grpc import *
from .deviceProfile_pb2 import*
from .deviceQueue_pb2_grpc import *
from .deviceQueue_pb2 import*
from .frameLog_pb2_grpc import *
from .frameLog_pb2 import*
from .fuotaDeployment_pb2_grpc import *
from .fuotaDeployment_pb2 import*
from .gateway_pb2_grpc import *
from .gateway_pb2 import*
from .gatewayProfile_pb2_grpc import *
from .gatewayProfile_pb2 import*
from .internal_pb2_grpc import *
from .internal_pb2 import*
from .multicastGroup_pb2_grpc import *
from .multicastGroup_pb2 import*
from .networkServer_pb2_grpc import *
from .networkServer_pb2 import*
from .organization_pb2_grpc import *
from .organization_pb2 import*
from .profiles_pb2_grpc import *
from .profiles_pb2 import*
from .serviceProfile_pb2_grpc import *
from .serviceProfile_pb2 import*
from .user_pb2_grpc import *
from .user_pb2 import*
| 31.548387
| 39
| 0.829243
| 135
| 978
| 5.674074
| 0.140741
| 0.37859
| 0.254569
| 0.332898
| 0.082245
| 0.082245
| 0
| 0
| 0
| 0
| 0
| 0.034443
| 0.109407
| 978
| 30
| 40
| 32.6
| 0.845006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
58536d40f031311864fed7df51a7a93a0229f817
| 1,075
|
py
|
Python
|
examples/rotate/rotate.py
|
dbrainio/wrappa
|
e7ecc65ce9025d89e2abae98de07902e517079df
|
[
"MIT"
] | 11
|
2019-01-21T17:37:42.000Z
|
2021-11-01T14:57:31.000Z
|
examples/rotate/rotate.py
|
dbrainio/wrappa
|
e7ecc65ce9025d89e2abae98de07902e517079df
|
[
"MIT"
] | null | null | null |
examples/rotate/rotate.py
|
dbrainio/wrappa
|
e7ecc65ce9025d89e2abae98de07902e517079df
|
[
"MIT"
] | null | null | null |
import numpy as np
from wrappa import WrappaObject, WrappaImage
class DSModel:
def __init__(self, **kwargs):
pass
def predict(self, data, **kwargs):
_ = kwargs
# Data is always an array of WrappaObjects
responses = []
for obj in data:
img = obj.image.as_ndarray
rotated_img = np.rot90(img)
resp = WrappaObject(WrappaImage.init_from_ndarray(
payload=rotated_img,
ext=obj.image.ext,
))
responses.append(resp)
return responses
def predict_180(self, data, **kwargs):
_ = kwargs
# Data is always an array of WrappaObjects
responses = []
for obj in data:
img = obj.image.as_ndarray
rotated_img = np.rot90(img)
rotated_img = np.rot90(rotated_img)
resp = WrappaObject(WrappaImage.init_from_ndarray(
payload=rotated_img,
ext=obj.image.ext,
))
responses.append(resp)
return responses
| 28.289474
| 62
| 0.55814
| 115
| 1,075
| 5.052174
| 0.330435
| 0.10327
| 0.061962
| 0.08778
| 0.769363
| 0.769363
| 0.769363
| 0.769363
| 0.769363
| 0.769363
| 0
| 0.013196
| 0.365581
| 1,075
| 38
| 63
| 28.289474
| 0.83871
| 0.075349
| 0
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.033333
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
585c4944367fde099ad0575ef59dc2f83d1d1d18
| 118
|
py
|
Python
|
pytasking/__init__.py
|
TokenChingy/multitasking
|
5aabf03c89294c6430d74533bbcc8bd8cba02b1c
|
[
"MIT"
] | 45
|
2019-12-03T02:47:11.000Z
|
2022-02-02T14:33:51.000Z
|
pytasking/__init__.py
|
TokenChingy/multitasking
|
5aabf03c89294c6430d74533bbcc8bd8cba02b1c
|
[
"MIT"
] | null | null | null |
pytasking/__init__.py
|
TokenChingy/multitasking
|
5aabf03c89294c6430d74533bbcc8bd8cba02b1c
|
[
"MIT"
] | 5
|
2019-12-03T08:46:02.000Z
|
2020-01-03T13:27:44.000Z
|
from pytasking.wrappers import *
from pytasking.utilities import *
from pytasking.manager import *
name = "pytasking"
| 23.6
| 33
| 0.79661
| 14
| 118
| 6.714286
| 0.5
| 0.414894
| 0.404255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127119
| 118
| 4
| 34
| 29.5
| 0.912621
| 0
| 0
| 0
| 0
| 0
| 0.076271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5869b53d4c6fdac2d5925abc47e0311623489a3b
| 11
|
py
|
Python
|
src/dsalgo/range_dp.py
|
kagemeka/python-algorithms
|
dface89b8c618845cf524429aa8e97c4b2b10ceb
|
[
"MIT"
] | 1
|
2022-02-10T02:13:07.000Z
|
2022-02-10T02:13:07.000Z
|
src/dsalgo/range_dp.py
|
kagemeka/python-algorithms
|
dface89b8c618845cf524429aa8e97c4b2b10ceb
|
[
"MIT"
] | 6
|
2022-01-05T09:15:54.000Z
|
2022-01-09T05:48:43.000Z
|
src/dsalgo/range_dp.py
|
kagemeka/python-algorithms
|
dface89b8c618845cf524429aa8e97c4b2b10ceb
|
[
"MIT"
] | null | null | null |
"""
DP
"""
| 2.75
| 3
| 0.181818
| 1
| 11
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 11
| 3
| 4
| 3.666667
| 0.25
| 0.181818
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
586ace71b3e4dd5e3733cc39221e570ffb023e73
| 27,636
|
py
|
Python
|
test/unit/object/test_item.py
|
jcleblanc/box-python-sdk
|
88d2a2daa129d76538fe0b5f90478dd4f7c4b8ad
|
[
"Apache-2.0"
] | null | null | null |
test/unit/object/test_item.py
|
jcleblanc/box-python-sdk
|
88d2a2daa129d76538fe0b5f90478dd4f7c4b8ad
|
[
"Apache-2.0"
] | null | null | null |
test/unit/object/test_item.py
|
jcleblanc/box-python-sdk
|
88d2a2daa129d76538fe0b5f90478dd4f7c4b8ad
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import json
import pytest
from boxsdk.exception import BoxAPIException
from boxsdk.config import API
from boxsdk.object.watermark import Watermark
from boxsdk.object.collaboration import Collaboration
from boxsdk.util.default_arg_value import SDK_VALUE_NOT_SET
@pytest.fixture(params=('file', 'folder'))
def test_item_and_response(test_file, test_folder, mock_file_response, mock_folder_response, request):
if request.param == 'file':
return test_file, mock_file_response
return test_folder, mock_folder_response
@pytest.fixture(params=('empty', 'same', 'other'))
def test_collections_for_addition(mock_collection_id, request):
"""Fixture returning a tuple of the expected collections values before and after addition"""
other_collection_id = mock_collection_id + '2'
if request.param == 'empty':
return [], [{'id': mock_collection_id}]
elif request.param == 'same':
# Adding a second instance of the same collection is handled correctly by the API,
# so for simplicity we do not check for an existing copy of the collection and just append
return [{'id': mock_collection_id}], [{'id': mock_collection_id}, {'id': mock_collection_id}]
elif request.param == 'other':
return [{'id': other_collection_id}], [{'id': other_collection_id}, {'id': mock_collection_id}]
raise NotImplementedError("Forgot to implement {}".format(request.param))
@pytest.fixture(params=('empty', 'only_removed', 'only_other', 'other_and_removed'))
def test_collections_for_removal(mock_collection_id, request):
"""Fixture returning a tuple of the expected collections values before and after removal"""
other_collection_id = mock_collection_id + '2'
if request.param == 'empty':
return [], []
elif request.param == 'only_removed':
return [{'id': mock_collection_id}], []
elif request.param == 'only_other':
return [{'id': other_collection_id}], [{'id': other_collection_id}]
elif request.param == 'other_and_removed':
return [{'id': mock_collection_id}, {'id': other_collection_id}], [{'id': other_collection_id}]
raise NotImplementedError("Forgot to implement {}".format(request.param))
def test_update_info(test_item_and_response, mock_box_session, etag, if_match_header):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value = mock_item_response
data = {'foo': 'bar', 'baz': {'foo': 'bar'}, 'num': 4}
update_response = test_item.update_info(data, etag=etag)
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(data), headers=if_match_header, params=None)
assert isinstance(update_response, test_item.__class__)
assert update_response.object_id == test_item.object_id
def test_update_info_with_default_request_kwargs(test_item_and_response, mock_box_session, mock_box_session_2):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.with_default_network_request_kwargs.return_value = mock_box_session_2
mock_box_session_2.put.return_value = mock_item_response
data = {'foo': 'bar', 'baz': {'foo': 'bar'}, 'num': 4}
extra_network_parameters = {'timeout': 1}
update_response = test_item.update_info(data, extra_network_parameters=extra_network_parameters)
mock_box_session.with_default_network_request_kwargs.assert_called_once_with({'timeout': 1})
mock_box_session_2.put.assert_called_once_with(expected_url, data=json.dumps(data), headers=None, params=None)
assert isinstance(update_response, test_item.__class__)
assert update_response.object_id == test_item.object_id
def test_rename_item(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value = mock_item_response
rename_response = test_item.rename('new name')
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps({'name': 'new name'}), params=None, headers=None)
assert isinstance(rename_response, test_item.__class__)
@pytest.mark.parametrize('params, expected_data', [
({}, {}),
({'name': 'New name.pdf'}, {'name': 'New name.pdf'})
])
def test_copy_item(test_item_and_response, mock_box_session, test_folder, mock_object_id, params, expected_data):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url('copy')
expected_body = {
'parent': {'id': mock_object_id},
}
expected_body.update(expected_data)
mock_box_session.post.return_value = mock_item_response
copy_response = test_item.copy(test_folder, **params)
mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps(expected_body))
assert isinstance(copy_response, test_item.__class__)
@pytest.mark.parametrize('params, expected_data', [
({}, {}),
({'name': 'New name.pdf'}, {'name': 'New name.pdf'})
])
def test_move_item(test_item_and_response, mock_box_session, test_folder, mock_object_id, params, expected_data):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url()
expected_body = {
'parent': {'id': mock_object_id},
}
expected_body.update(expected_data)
mock_box_session.put.return_value = mock_item_response
move_response = test_item.move(test_folder, **params)
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(expected_body), params=None, headers=None)
assert isinstance(move_response, test_item.__class__)
def test_get_shared_link(
test_item_and_response,
mock_box_session,
shared_link_access,
shared_link_unshared_at,
shared_link_password,
shared_link_can_download,
shared_link_can_preview,
test_url,
etag,
if_match_header,
):
# pylint:disable=redefined-outer-name, protected-access
test_item, _ = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value.json.return_value = {
'type': test_item.object_type,
'id': test_item.object_id,
'shared_link': {
'url': test_url,
},
}
expected_data = {'shared_link': {}}
if shared_link_access is not None:
expected_data['shared_link']['access'] = shared_link_access
if shared_link_unshared_at is not SDK_VALUE_NOT_SET:
expected_data['shared_link']['unshared_at'] = shared_link_unshared_at
if shared_link_can_download is not None or shared_link_can_preview is not None:
expected_data['shared_link']['permissions'] = permissions = {}
if shared_link_can_download is not None:
permissions['can_download'] = shared_link_can_download
if shared_link_can_preview is not None:
permissions['can_preview'] = shared_link_can_preview
if shared_link_password is not None:
expected_data['shared_link']['password'] = shared_link_password
url = test_item.get_shared_link(
etag=etag,
access=shared_link_access,
unshared_at=shared_link_unshared_at,
password=shared_link_password,
allow_download=shared_link_can_download,
allow_preview=shared_link_can_preview,
)
mock_box_session.put.assert_called_once_with(
expected_url,
data=json.dumps(expected_data),
headers=if_match_header,
params=None,
)
assert url == test_url
def test_clear_unshared_at_for_shared_link(
test_item_and_response,
mock_box_session,
test_url,
):
test_item, _ = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value.json.return_value = {
'type': test_item.object_type,
'id': test_item.object_id,
'shared_link': {
'url': test_url,
'unshared_at': None,
},
}
expected_data = {'shared_link': {'unshared_at': None, }, }
shared_link = test_item.get_shared_link(unshared_at=None)
mock_box_session.put.assert_called_once_with(
expected_url,
data=json.dumps(expected_data),
headers=None,
params=None,
)
assert shared_link is test_url
def test_remove_shared_link(test_item_and_response, mock_box_session, etag, if_match_header):
# pylint:disable=redefined-outer-name, protected-access
test_item, _ = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.put.return_value.json.return_value = {
'type': test_item.object_type,
'id': test_item.object_id,
'shared_link': None,
}
removed = test_item.remove_shared_link(etag=etag)
mock_box_session.put.assert_called_once_with(
expected_url,
data=json.dumps({'shared_link': None}),
headers=if_match_header,
params=None,
)
assert removed is True
@pytest.mark.parametrize('fields', (None, ['name', 'created_at']))
def test_get(test_item_and_response, mock_box_session, fields, mock_object_id, etag, if_none_match_header):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
expected_url = test_item.get_url()
mock_box_session.get.return_value = mock_item_response
expected_params = {'fields': ','.join(fields)} if fields else None
info = test_item.get(fields, etag=etag)
mock_box_session.get.assert_called_once_with(expected_url, params=expected_params, headers=if_none_match_header)
assert isinstance(info, test_item.__class__)
assert info.id == mock_object_id
def test_add_to_collection(test_item_and_response, mock_box_session, mock_collection, test_collections_for_addition):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
current_collections, expected_collections = test_collections_for_addition
expected_url = test_item.get_url()
expected_params = {'fields': 'collections'}
expected_data = {
'collections': expected_collections
}
mock_response = {
'type': test_item.object_type,
'id': test_item.object_id,
'collections': current_collections,
}
mock_box_session.get.return_value.json.return_value = mock_response
mock_box_session.put.return_value = mock_item_response
test_item.add_to_collection(mock_collection)
mock_box_session.get.assert_called_once_with(expected_url, headers=None, params=expected_params)
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(expected_data), headers=None, params=None)
def test_remove_from_collection(test_item_and_response, mock_box_session, mock_collection, test_collections_for_removal):
# pylint:disable=redefined-outer-name, protected-access
test_item, mock_item_response = test_item_and_response
current_collections, expected_collections = test_collections_for_removal
expected_url = test_item.get_url()
expected_params = {'fields': 'collections'}
expected_data = {
'collections': expected_collections
}
mock_response = {
'type': test_item.object_type,
'id': test_item.object_id,
'collections': current_collections,
}
mock_box_session.get.return_value.json.return_value = mock_response
mock_box_session.put.return_value = mock_item_response
test_item.remove_from_collection(mock_collection)
mock_box_session.get.assert_called_once_with(expected_url, headers=None, params=expected_params)
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(expected_data), headers=None, params=None)
def test_get_watermark(test_item_and_response, mock_box_session):
test_item, _ = test_item_and_response
created_at = '2016-10-31T15:33:33-07:00'
modified_at = '2016-10-31T15:33:33-07:00'
expected_url = '{0}/{1}s/{2}/watermark'.format(API.BASE_API_URL, test_item.object_type, test_item.object_id)
mock_box_session.get.return_value.json.return_value = {
'watermark': {
'created_at': created_at,
'modified_at': modified_at,
},
}
watermark = test_item.get_watermark()
mock_box_session.get.assert_called_once_with(expected_url)
assert isinstance(watermark, Watermark)
assert watermark['created_at'] == created_at
assert watermark['modified_at'] == modified_at
def test_apply_watermark(test_item_and_response, mock_box_session):
test_item, _ = test_item_and_response
created_at = '2016-10-31T15:33:33-07:00'
modified_at = '2016-10-31T15:33:33-07:00'
expected_url = '{0}/{1}s/{2}/watermark'.format(API.BASE_API_URL, test_item.object_type, test_item.object_id)
mock_box_session.put.return_value.json.return_value = {
'watermark': {
'created_at': created_at,
'modified_at': modified_at,
},
}
watermark = test_item.apply_watermark()
mock_box_session.put.assert_called_once_with(expected_url, data='{"watermark": {"imprint": "default"}}')
assert isinstance(watermark, Watermark)
assert watermark['created_at'] == created_at
assert watermark['modified_at'] == modified_at
def test_delete_watermark(test_item_and_response, mock_box_session):
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/watermark'.format(API.BASE_API_URL, test_item.object_type, test_item.object_id)
mock_box_session.delete.return_value.ok = True
is_watermark_deleted = test_item.delete_watermark()
mock_box_session.delete.assert_called_once_with(expected_url, expect_json_response=False)
assert is_watermark_deleted is True
def test_collaborate_with_group(test_item_and_response, test_group, mock_box_session):
# pylint:disable=redefined-outer-name, protected-access
test_item, _ = test_item_and_response
expected_url = '{0}/collaborations'.format(API.BASE_API_URL)
expected_data = {
'item': {
'type': test_item.object_type,
'id': test_item.object_id,
},
'accessible_by': {
'type': test_group.object_type,
'id': test_group.object_id,
},
'role': 'editor',
}
mock_collaboration = {
'type': 'collaboration',
'id': '1234',
'created_by': {
'type': 'user',
'id': '1111',
}
}
mock_box_session.post.return_value.json.return_value = mock_collaboration
collaboration = test_item.collaborate(test_group, 'editor')
mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps(expected_data), params={})
assert collaboration.id == mock_collaboration['id']
assert collaboration['type'] == mock_collaboration['type']
assert collaboration['created_by']['id'] == mock_collaboration['created_by']['id']
@pytest.mark.parametrize('can_view_path,fields,notify,data,params', [
(None, None, None, {}, {}),
(True, None, None, {'can_view_path': True}, {}),
(False, None, None, {'can_view_path': False}, {}),
(None, ['type', 'id', 'created_by'], None, {}, {'fields': 'type,id,created_by'}),
(None, None, True, {}, {'notify': True}),
(None, None, False, {}, {'notify': False}),
(True, ['type', 'id', 'created_by'], False, {'can_view_path': True}, {'fields': 'type,id,created_by', 'notify': False})
])
def test_collaborate_with_user(test_item_and_response, mock_user, mock_box_session, can_view_path, fields, notify, data, params):
# pylint:disable=redefined-outer-name, protected-access
test_item, _ = test_item_and_response
expected_url = '{0}/collaborations'.format(API.BASE_API_URL)
expected_data = {
'item': {
'type': test_item.object_type,
'id': test_item.object_id,
},
'accessible_by': {
'type': mock_user.object_type,
'id': mock_user.object_id,
},
'role': 'editor',
}
expected_data.update(data)
mock_collaboration = {
'type': 'collaboration',
'id': '1234',
'created_by': {
'type': 'user',
'id': '1111',
}
}
expected_params = params
mock_box_session.post.return_value.json.return_value = mock_collaboration
collaboration = test_item.collaborate(mock_user, 'editor', can_view_path=can_view_path, fields=fields, notify=notify)
mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps(expected_data), params=expected_params)
assert collaboration.id == mock_collaboration['id']
assert collaboration['type'] == mock_collaboration['type']
assert collaboration['created_by']['id'] == mock_collaboration['created_by']['id']
@pytest.mark.parametrize('can_view_path,fields,notify,data,params', [
(None, None, None, {}, {}),
(True, None, None, {'can_view_path': True}, {}),
(False, None, None, {'can_view_path': False}, {}),
(None, ['type', 'id', 'created_by'], None, {}, {'fields': 'type,id,created_by'}),
(None, None, True, {}, {'notify': True}),
(None, None, False, {}, {'notify': False}),
(True, ['type', 'id', 'created_by'], False, {'can_view_path': True}, {'fields': 'type,id,created_by', 'notify': False})
])
def test_collaborate_with_login(test_item_and_response, mock_box_session, can_view_path, fields, notify, data, params):
# pylint:disable=redefined-outer-name, protected-access
test_item, _ = test_item_and_response
expected_url = '{0}/collaborations'.format(API.BASE_API_URL)
expected_data = {
'item': {
'type': test_item.object_type,
'id': test_item.object_id,
},
'accessible_by': {
'type': 'user',
'login': 'test@example.com',
},
'role': 'editor',
}
expected_data.update(data)
mock_collaboration = {
'type': 'collaboration',
'id': '1234',
'created_by': {
'type': 'user',
'id': '1111',
}
}
expected_params = params
mock_box_session.post.return_value.json.return_value = mock_collaboration
collaboration = test_item.collaborate_with_login('test@example.com', 'editor', can_view_path=can_view_path, fields=fields, notify=notify)
mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps(expected_data), params=expected_params)
assert collaboration.id == mock_collaboration['id']
assert collaboration['type'] == mock_collaboration['type']
assert collaboration['created_by']['id'] == mock_collaboration['created_by']['id']
def test_collaborations(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name, protected-access
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/collaborations'.format(API.BASE_API_URL, test_item.object_type, test_item.object_id)
mock_collaboration = {
'type': 'collaboration',
'id': '12345',
'created_by': {
'type': 'user',
'id': '33333',
},
}
mock_box_session.get.return_value.json.return_value = {
'limit': 500,
'entries': [mock_collaboration]
}
collaborations = test_item.get_collaborations(limit=500)
collaboration = collaborations.next()
mock_box_session.get.assert_called_once_with(expected_url, params={'limit': 500})
assert isinstance(collaboration, Collaboration)
assert collaboration.id == mock_collaboration['id']
assert collaboration.type == mock_collaboration['type']
assert collaboration['created_by']['type'] == 'user'
assert collaboration['created_by']['id'] == '33333'
def test_get_all_metadata(test_item_and_response, mock_box_session):
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/metadata'.format(API.BASE_API_URL, test_item.object_type, test_item.object_id)
mock_metadata = {
'currentDocumentStage': 'prioritization',
'needsApprovalFrom': 'planning team',
'$type': 'documentFlow-452b4c9d-c3ad-4ac7-b1ad-9d5192f2fc5f',
'$parent': 'folder_998951261',
'$id': 'e57f90ff-0044-48c2-807d-06b908765baf',
'$version': 1,
'$typeVersion': 2,
'maximumDaysAllowedInCurrentStage': 5,
'$template': 'documentFlow',
'$scope': 'enterprise_12345',
}
mock_box_session.get.return_value.json.return_value = {
'limit': 100,
'entries': [mock_metadata]
}
all_metadata = test_item.get_all_metadata()
metadata = all_metadata.next()
mock_box_session.get.assert_called_once_with(expected_url, params={})
assert isinstance(metadata, dict)
for key in metadata:
assert mock_metadata[key] == mock_metadata[key]
def test_add_classification(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/metadata/enterprise/securityClassification-6VMVochwUWo'.format(
API.BASE_API_URL,
test_item.object_type,
test_item.object_id,
)
metadata_response = {
'Box__Security__Classification__Key': 'Public',
}
metadata_response = mock_box_session.post.return_value.json.return_value = metadata_response
data = {
'Box__Security__Classification__Key': 'Public'
}
headers = {
b'Content-Type': b'application/json'
}
metadata = test_item.add_classification('Public')
mock_box_session.post.assert_called_once_with(expected_url, headers=headers, data=json.dumps(data))
assert metadata is metadata_response['Box__Security__Classification__Key']
def test_update_classification(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/metadata/enterprise/securityClassification-6VMVochwUWo'.format(
API.BASE_API_URL,
test_item.object_type,
test_item.object_id,
)
metadata_response = {
'Box__Security__Classification__Key': 'Internal',
}
metadata_response = mock_box_session.put.return_value.json.return_value = metadata_response
data = [{
'op': 'add',
'path': '/Box__Security__Classification__Key',
'value': 'Internal',
}]
headers = {
b'Content-Type': b'application/json-patch+json'
}
metadata = test_item.update_classification('Internal')
mock_box_session.put.assert_called_once_with(expected_url, headers=headers, data=json.dumps(data))
assert metadata is metadata_response['Box__Security__Classification__Key']
def test_set_classification_succeeds(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name
test_item, _ = test_item_and_response
metadata_response = {
'Box__Security__Classification__Key': 'Public',
}
expected_url = '{0}/{1}s/{2}/metadata/enterprise/securityClassification-6VMVochwUWo'.format(
API.BASE_API_URL,
test_item.object_type,
test_item.object_id,
)
post_data = {
'Box__Security__Classification__Key': 'Public',
}
put_data = [{
'op': 'add',
'path': '/Box__Security__Classification__Key',
'value': 'Public',
}]
post_headers = {
b'Content-Type': b'application/json'
}
put_headers = {
b'Content-Type': b'application/json-patch+json'
}
mock_box_session.post.side_effect = [BoxAPIException(status=409)]
mock_box_session.put.return_value.json.return_value = metadata_response
metadata = test_item.set_classification('Public')
mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps(post_data), headers=post_headers)
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(put_data), headers=put_headers)
assert metadata is metadata_response['Box__Security__Classification__Key']
def test_set_classification_fails(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/metadata/enterprise/securityClassification-6VMVochwUWo'.format(
API.BASE_API_URL,
test_item.object_type,
test_item.object_id,
)
post_data = {
'Box__Security__Classification__Key': 'Public',
}
post_headers = {
b'Content-Type': b'application/json'
}
mock_box_session.post.side_effect = [BoxAPIException(status=500)]
with pytest.raises(BoxAPIException):
test_item.set_classification('Public')
mock_box_session.post.assert_called_once_with(expected_url, data=json.dumps(post_data), headers=post_headers)
def test_get_classification_succeeds(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/metadata/enterprise/securityClassification-6VMVochwUWo'.format(
API.BASE_API_URL,
test_item.object_type,
test_item.object_id,
)
metadata_response = {
'Box__Security__Classification__Key': 'Public'
}
mock_box_session.get.return_value.json.return_value = metadata_response
metadata = test_item.get_classification()
assert metadata is metadata_response['Box__Security__Classification__Key']
mock_box_session.get.assert_called_once_with(expected_url)
def test_get_classification_not_found(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/metadata/enterprise/securityClassification-6VMVochwUWo'.format(
API.BASE_API_URL,
test_item.object_type,
test_item.object_id,
)
mock_box_session.get.side_effect = [BoxAPIException(status=404, code="instance_not_found")]
metadata = test_item.get_classification()
assert metadata is None
mock_box_session.get.assert_called_once_with(expected_url)
def test_get_classification_raises_exception(test_item_and_response, mock_box_session):
# pylint:disable=redefined-outer-name
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/metadata/enterprise/securityClassification-6VMVochwUWo'.format(
API.BASE_API_URL,
test_item.object_type,
test_item.object_id,
)
mock_box_session.get.side_effect = [BoxAPIException(status=500)]
with pytest.raises(BoxAPIException):
test_item.get_classification()
mock_box_session.get.assert_called_once_with(expected_url)
def test_remove_classification(test_item_and_response, mock_box_session, make_mock_box_request):
# pylint:disable=redefined-outer-name
test_item, _ = test_item_and_response
expected_url = '{0}/{1}s/{2}/metadata/enterprise/securityClassification-6VMVochwUWo'.format(
API.BASE_API_URL,
test_item.object_type,
test_item.object_id,
)
mock_box_session.delete.return_value, _ = make_mock_box_request(response_ok='success')
is_removed = test_item.remove_classification()
mock_box_session.delete.assert_called_once_with(expected_url)
assert is_removed is 'success'
| 42.128049
| 141
| 0.713417
| 3,535
| 27,636
| 5.165771
| 0.068741
| 0.074476
| 0.069766
| 0.057226
| 0.828104
| 0.798532
| 0.777285
| 0.753683
| 0.725535
| 0.693719
| 0
| 0.010972
| 0.172239
| 27,636
| 655
| 142
| 42.192366
| 0.787288
| 0.050695
| 0
| 0.536804
| 0
| 0
| 0.13687
| 0.055076
| 0
| 0
| 0
| 0
| 0.129264
| 1
| 0.05386
| false
| 0.007181
| 0.014363
| 0
| 0.084381
| 0.001795
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
587eeed9e036f9e097db59f1a6092d837aa67957
| 580
|
py
|
Python
|
micrograph_cleaner_em/__init__.py
|
rsanchezgarc/carbon_cleaner_em
|
8b3041f8b5049bc76414ffd38c30e8bdce19beea
|
[
"Apache-2.0"
] | 16
|
2019-06-24T08:52:28.000Z
|
2022-03-23T11:51:18.000Z
|
micrograph_cleaner_em/__init__.py
|
rsanchezgarc/carbonCleaner
|
8b3041f8b5049bc76414ffd38c30e8bdce19beea
|
[
"Apache-2.0"
] | 4
|
2019-10-15T14:48:48.000Z
|
2021-10-14T18:35:27.000Z
|
micrograph_cleaner_em/__init__.py
|
rsanchezgarc/carbonCleaner
|
8b3041f8b5049bc76414ffd38c30e8bdce19beea
|
[
"Apache-2.0"
] | null | null | null |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings("ignore", "Cannot provide views on a non-contiguous")
warnings.filterwarnings("ignore", "Unrecognised machine stamp")
warnings.filterwarnings("ignore", "Map ID string not found")
warnings.filterwarnings("ignore", ".*", category=ImportWarning)
warnings.filterwarnings("ignore", ".*", category=DeprecationWarning)
try:
warnings.filterwarnings("ignore", ".*", category=ResourceWarning)
except NameError:
pass
from .cleanOneMic import cleanOneMic
from .predictMask import MaskPredictor
| 32.222222
| 77
| 0.784483
| 63
| 580
| 7.15873
| 0.619048
| 0.292683
| 0.372506
| 0.239468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001894
| 0.089655
| 580
| 18
| 78
| 32.222222
| 0.852273
| 0
| 0
| 0
| 0
| 0
| 0.261618
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.071429
| 0.357143
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
589aae547a3d8d1563829b93a59388c91ad6cbef
| 103
|
py
|
Python
|
mockerinho/__init__.py
|
callmecoolmanx/webapisimulator
|
2be175ffc4028793f5fca90db0d52f70d411eab0
|
[
"MIT"
] | null | null | null |
mockerinho/__init__.py
|
callmecoolmanx/webapisimulator
|
2be175ffc4028793f5fca90db0d52f70d411eab0
|
[
"MIT"
] | 2
|
2022-03-26T20:30:42.000Z
|
2022-03-28T19:22:42.000Z
|
mockerinho/__init__.py
|
callmecoolmanx/webapisimulator
|
2be175ffc4028793f5fca90db0d52f70d411eab0
|
[
"MIT"
] | null | null | null |
from .utils import get_version_number
VERSION = (0, 2, 1)
__version__ = get_version_number(VERSION)
| 14.714286
| 41
| 0.76699
| 15
| 103
| 4.733333
| 0.6
| 0.28169
| 0.450704
| 0.647887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034091
| 0.145631
| 103
| 6
| 42
| 17.166667
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
543251d2d253462a54313091caed96314930b761
| 5,337
|
py
|
Python
|
somnium/tests/test_lattice.py
|
ivallesp/somnium
|
dc628cf18d7b4b4475106cf2a390df4ab5d2ff19
|
[
"MIT"
] | 2
|
2019-09-04T10:26:03.000Z
|
2019-10-28T15:34:18.000Z
|
somnium/tests/test_lattice.py
|
ivallesp/somnium
|
dc628cf18d7b4b4475106cf2a390df4ab5d2ff19
|
[
"MIT"
] | null | null | null |
somnium/tests/test_lattice.py
|
ivallesp/somnium
|
dc628cf18d7b4b4475106cf2a390df4ab5d2ff19
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import numpy as np
import math
from somnium.lattice import LatticeFactory
from scipy.spatial.distance import pdist, squareform
from itertools import combinations, product, compress
from somnium.tests.util import euclidean_distance
class TestRectLattice(TestCase):
def test_dimension(self):
lat = LatticeFactory.build("rect")(n_rows=2, n_cols=3, distance_metric="euclidean")
self.assertEqual(6, len(lat.coordinates))
self.assertEqual(2, lat.n_rows)
self.assertEqual(3, lat.n_cols)
def test_distances(self):
lat = LatticeFactory.build("rect")(n_rows=2, n_cols=3, distance_metric="euclidean")
pairs = list(product(lat.coordinates, lat.coordinates))
dist = np.array([euclidean_distance(x=u1, y=u2) for (u1, u2) in pairs])
dist = dist.reshape(6,2,3)
self.assertTrue(np.allclose(dist, lat.distances))
def test_ordering(self):
lat = LatticeFactory.build("rect")(n_rows=2, n_cols=3, distance_metric="euclidean")
self.assertTrue(lat.distances[0, 0, 0] == 0)
self.assertTrue(lat.distances[1, 0, 1] == 0)
self.assertTrue(lat.distances[2, 0, 2] == 0)
self.assertTrue(lat.distances[5, 1, 2] == 0)
def test_n_neighbors(self):
lat = LatticeFactory.build("rect")(n_rows=4, n_cols=3, distance_metric="euclidean")
dist_matrix = squareform(pdist(lat.coordinates))
n_neighbors = set(np.sum(np.isclose(dist_matrix, 1), axis=0))
self.assertEqual({2,3,4}, n_neighbors)
def test_neighborhood_method(self):
lat = LatticeFactory.build("rect")(n_rows=4, n_cols=7, distance_metric="euclidean")
pairs = list(combinations(lat.coordinates, 2))
neighbors = [euclidean_distance(x=u1, y=u2)==1 for (u1, u2) in pairs]
neighbor_pairs = list(compress(pairs, neighbors))
not_neighbor_pairs = list(compress(pairs, [not(n) for n in neighbors]))
self.assertTrue(all([lat.are_neighbors(*x) for x in neighbor_pairs]))
self.assertTrue(not(any([lat.are_neighbors(*x) for x in not_neighbor_pairs])))
def test_neighborhood_method_cherrypick(self):
lat = LatticeFactory.build("rect")(n_rows=7, n_cols=8, distance_metric="euclidean")
center = 14
neighbors = [6, 13, 15, 22]
self.assertTrue(all([lat.are_neighbor_indices(center, n) for n in neighbors]))
lat = LatticeFactory.build("rect")(n_rows=6, n_cols=7, distance_metric="euclidean")
center = 8
neighbors = [1, 7, 9, 15]
self.assertTrue(all([lat.are_neighbor_indices(center, n) for n in neighbors]))
center = 15
neighbors = [8 ,14, 16, 22]
self.assertTrue(all([lat.are_neighbor_indices(center, n) for n in neighbors]))
class TestHexaLattice(TestCase):
def test_dimension(self):
lat = LatticeFactory.build("hexa")(n_rows=2, n_cols=3, distance_metric="euclidean")
self.assertEqual(6, len(lat.coordinates))
self.assertEqual(2, lat.n_rows)
self.assertEqual(3, lat.n_cols)
def test_distances(self):
lat = LatticeFactory.build("hexa")(n_rows=2, n_cols=3, distance_metric="euclidean")
pairs = list(product(lat.coordinates, lat.coordinates))
dist = np.array([euclidean_distance(x=u1, y=u2) for (u1, u2) in pairs])
dist = dist.reshape(6, 2, 3)
self.assertTrue(np.allclose(dist, lat.distances))
def test_ordering(self):
lat = LatticeFactory.build("hexa")(n_rows=2, n_cols=3, distance_metric="euclidean")
self.assertTrue(lat.distances[0, 0, 0] == 0)
self.assertTrue(lat.distances[1, 0, 1] == 0)
self.assertTrue(lat.distances[2, 0, 2] == 0)
self.assertTrue(lat.distances[5, 1, 2] == 0)
def test_n_neighbors(self):
lat = LatticeFactory.build("hexa")(n_rows=4, n_cols=3, distance_metric="euclidean")
dist_matrix = squareform(pdist(lat.coordinates))
n_neighbors = set(np.sum(np.isclose(dist_matrix, 1), axis=0))
self.assertEqual({2,3,4,5,6}, n_neighbors)
def test_neighborhood_method_in_batch(self):
lat = LatticeFactory.build("hexa")(n_rows=4, n_cols=7, distance_metric="euclidean")
pairs = list(combinations(lat.coordinates, 2))
neighbors = [math.isclose(a=euclidean_distance(x=u1, y=u2), b=1) for (u1, u2) in pairs]
neighbor_pairs = list(compress(pairs, neighbors))
not_neighbor_pairs = list(compress(pairs, [not(n) for n in neighbors]))
self.assertTrue(all([lat.are_neighbors(*x) for x in neighbor_pairs]))
self.assertTrue(not(any([lat.are_neighbors(*x) for x in not_neighbor_pairs])))
def test_neighborhood_method_cherrypick(self):
lat = LatticeFactory.build("hexa")(n_rows=7, n_cols=8, distance_metric="euclidean")
center = 14
neighbors = [5, 6, 13, 15, 21, 22]
self.assertTrue(all([lat.are_neighbor_indices(center, n) for n in neighbors]))
lat = LatticeFactory.build("hexa")(n_rows=6, n_cols=7, distance_metric="euclidean")
center = 8
neighbors = [0, 1, 7, 9, 14, 15]
self.assertTrue(all([lat.are_neighbor_indices(center, n) for n in neighbors]))
center = 15
neighbors = [8, 9, 14, 16, 22, 23]
self.assertTrue(all([lat.are_neighbor_indices(center, n) for n in neighbors]))
| 49.416667
| 95
| 0.666479
| 757
| 5,337
| 4.554822
| 0.121532
| 0.081207
| 0.089327
| 0.090487
| 0.907193
| 0.907193
| 0.87123
| 0.86891
| 0.854988
| 0.854988
| 0
| 0.037485
| 0.195241
| 5,337
| 108
| 96
| 49.416667
| 0.765309
| 0
| 0
| 0.688172
| 0
| 0
| 0.034095
| 0
| 0
| 0
| 0
| 0
| 0.301075
| 1
| 0.129032
| false
| 0
| 0.075269
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
544f0c54e77ee812e70a6f8a2bc50fb63280b5d2
| 66
|
py
|
Python
|
ufdl-image-segmentation-app/src/ufdl/image_segmentation_app/views/mixins/__init__.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | null | null | null |
ufdl-image-segmentation-app/src/ufdl/image_segmentation_app/views/mixins/__init__.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | 85
|
2020-07-24T00:04:28.000Z
|
2022-02-10T10:35:15.000Z
|
ufdl-image-segmentation-app/src/ufdl/image_segmentation_app/views/mixins/__init__.py
|
waikato-ufdl/ufdl-backend
|
776fc906c61eba6c2f2e6324758e7b8a323e30d7
|
[
"Apache-2.0"
] | null | null | null |
from ._SegmentationLayersViewSet import SegmentationLayersViewSet
| 33
| 65
| 0.924242
| 4
| 66
| 15
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 66
| 1
| 66
| 66
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5471a93654c2ca91902e75be759d7daabb4a732a
| 409
|
py
|
Python
|
librus_tricks/tools.py
|
barpec12/Librus-Tricks
|
b52a29cd0bed7ba251a1e0b2d82d4c365ef5e01c
|
[
"MIT"
] | 5
|
2020-09-11T07:34:31.000Z
|
2022-01-13T12:03:24.000Z
|
librus_tricks/tools.py
|
barpec12/Librus-Tricks
|
b52a29cd0bed7ba251a1e0b2d82d4c365ef5e01c
|
[
"MIT"
] | null | null | null |
librus_tricks/tools.py
|
barpec12/Librus-Tricks
|
b52a29cd0bed7ba251a1e0b2d82d4c365ef5e01c
|
[
"MIT"
] | 3
|
2020-09-01T19:22:15.000Z
|
2020-11-10T09:34:20.000Z
|
from datetime import datetime, timedelta
def get_next_monday(now=datetime.now()):
for _ in range(8):
if now.weekday() == 0:
return now.date()
else:
now = now + timedelta(days=1)
def get_actual_monday(now=datetime.now()):
for _ in range(8):
if now.weekday() == 0:
return now.date()
else:
now = now - timedelta(days=1)
| 22.722222
| 42
| 0.550122
| 53
| 409
| 4.132075
| 0.415094
| 0.054795
| 0.155251
| 0.182648
| 0.739726
| 0.739726
| 0.739726
| 0.739726
| 0.739726
| 0.739726
| 0
| 0.021739
| 0.325183
| 409
| 17
| 43
| 24.058824
| 0.771739
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
547ab443ca9a007ae306877863ef1ac60acd94d5
| 42
|
py
|
Python
|
aiomal/__init__.py
|
thewallacems/aiomal
|
9920ca11ea2c84978b2df149c5bc727e33cd1b63
|
[
"MIT"
] | null | null | null |
aiomal/__init__.py
|
thewallacems/aiomal
|
9920ca11ea2c84978b2df149c5bc727e33cd1b63
|
[
"MIT"
] | null | null | null |
aiomal/__init__.py
|
thewallacems/aiomal
|
9920ca11ea2c84978b2df149c5bc727e33cd1b63
|
[
"MIT"
] | null | null | null |
from .errors import *
from .http import *
| 14
| 21
| 0.714286
| 6
| 42
| 5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 42
| 2
| 22
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54b3e7dc79a3905565e39ee73fe3d4f6ace5c541
| 27
|
py
|
Python
|
__init__.py
|
enisimsar/watchtower-news
|
222d2e52e76ef32ebb78eb325f4c32b64c0ba1a6
|
[
"MIT"
] | 2
|
2019-02-21T18:29:09.000Z
|
2021-01-27T14:52:46.000Z
|
__init__.py
|
enisimsar/watchtower-news
|
222d2e52e76ef32ebb78eb325f4c32b64c0ba1a6
|
[
"MIT"
] | 3
|
2018-11-22T08:34:04.000Z
|
2021-06-01T22:47:19.000Z
|
__init__.py
|
enisimsar/watchtower-news
|
222d2e52e76ef32ebb78eb325f4c32b64c0ba1a6
|
[
"MIT"
] | 1
|
2019-06-13T10:45:46.000Z
|
2019-06-13T10:45:46.000Z
|
from listen_module import *
| 27
| 27
| 0.851852
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54c025628c7abb52014c089a62f66b550f1ff5c0
| 8,141
|
py
|
Python
|
src/asm/translation/tests/test_translation.py
|
ctheune/assembly-cms
|
20e000373fc30d9a14cb5dc882499b5eed1d86ee
|
[
"ZPL-2.1"
] | null | null | null |
src/asm/translation/tests/test_translation.py
|
ctheune/assembly-cms
|
20e000373fc30d9a14cb5dc882499b5eed1d86ee
|
[
"ZPL-2.1"
] | null | null | null |
src/asm/translation/tests/test_translation.py
|
ctheune/assembly-cms
|
20e000373fc30d9a14cb5dc882499b5eed1d86ee
|
[
"ZPL-2.1"
] | null | null | null |
# XXX this import is for fixing circular cmsui import bug. Remove it
# when cmsui is removed from core. #7345
import asm.cms.edition
import asm.cms.page
import asm.cms.htmlpage
import asm.cms.interfaces
import asm.translation.interfaces
import asm.translation.translation
import unittest
import zope.component
import zope.publisher.browser
import zope.app.testing.placelesssetup
class TranslationTests(unittest.TestCase):
def setUp(self):
zope.app.testing.placelesssetup.setUp()
sm = zope.component.getGlobalSiteManager()
sm.registerUtility(
['fi', 'en'], asm.translation.interfaces.ILanguageProfile)
sm.registerUtility(
asm.cms.htmlpage.HTMLPage,
asm.cms.interfaces.IEditionFactory,
name='htmlpage')
self.page = asm.cms.page.Page('htmlpage')
self.request = zope.publisher.browser.TestRequest()
def tearDown(self):
zope.app.testing.placelesssetup.tearDown()
def _select(self):
selector = asm.translation.translation.RetailEditionSelector(
self.request)
return selector.select(self.page)
def test_select_no_preference_no_editions(self):
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([], acceptable)
def test_select_no_preference_no_fallback(self):
self.page.addEdition(['lang:en'])
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([], acceptable)
def test_select_no_preference_with_fallback(self):
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([edition_fi], acceptable)
def test_select_no_preference_with_fallback_and_other(self):
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([edition_fi], acceptable)
def test_select_with_cookie_no_edition(self):
self.request._cookies['asm.translation.lang'] = 'fi'
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([], acceptable)
def test_select_with_cookie_and_matching_edition(self):
self.request._cookies['asm.translation.lang'] = 'en'
edition_en = self.page.addEdition(['lang:en'])
preferred, acceptable = self._select()
self.assertEquals([edition_en], preferred)
self.assertEquals([], acceptable)
def test_select_with_cookie_and_fallback_edition(self):
self.request._cookies['asm.translation.lang'] = 'en'
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([edition_fi], acceptable)
def test_select_with_cookie_and_fallback_and_matching_editions(self):
self.request._cookies['asm.translation.lang'] = 'en'
edition_en = self.page.addEdition(['lang:en'])
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([edition_en], preferred)
self.assertEquals([edition_fi], acceptable)
def test_select_with_cookie_fallback_preferred_and_nonmatching_edition(self): # NOQA
self.request._cookies['asm.translation.lang'] = 'fi'
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([], acceptable)
def test_select_with_cookie_preferred_and_matching_edition(self):
self.request._cookies['asm.translation.lang'] = 'fi'
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([edition_fi], preferred)
self.assertEquals([], acceptable)
def test_select_with_cookie_fallback_preferred_and_matching_editions(self):
self.request._cookies['asm.translation.lang'] = 'fi'
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([edition_fi], preferred)
self.assertEquals([], acceptable)
def test_select_cookie_overrides_accept_language(self):
self.request._cookies['asm.translation.lang'] = 'fi'
self.request._environ['ACCEPT_LANGUAGE'] = 'en'
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([edition_fi], preferred)
self.assertEquals([], acceptable)
def test_select_unknown_accept_language_with_fallback(self):
self.request._environ['ACCEPT_LANGUAGE'] = 'none'
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([edition_fi], acceptable)
def test_select_unknown_accept_language_without_fallback(self):
self.request._environ['ACCEPT_LANGUAGE'] = 'none'
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([], acceptable)
def test_select_unknown_accept_language_with_fallback_and_nonmatching_editions(self): # NOQA
self.request._environ['ACCEPT_LANGUAGE'] = 'none'
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([], preferred)
self.assertEquals([edition_fi], acceptable)
def test_select_fi_higher_priority_than_en_with_fi_edition(self):
self.request._environ['ACCEPT_LANGUAGE'] = 'fi,en;q=0.8'
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([edition_fi], preferred)
self.assertEquals([], acceptable)
def test_select_fi_higher_priority_than_en_with_en_edition(self):
self.request._environ['ACCEPT_LANGUAGE'] = 'fi,en;q=0.8'
edition = self.page.addEdition(['lang:en'])
preferred, acceptable = self._select()
self.assertEquals([edition], preferred)
self.assertEquals([], acceptable)
def test_select_fi_higher_priority_than_en_with_en_and_fi_editions(self):
self.request._environ['ACCEPT_LANGUAGE'] = 'fi,en;q=0.8'
edition_en = self.page.addEdition(['lang:en'])
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([edition_fi], preferred)
self.assertEquals([edition_en], acceptable)
def test_select_en_higher_priority_than_fi_with_fi_edition(self):
self.request._environ['ACCEPT_LANGUAGE'] = 'en,fi;q=0.8'
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([edition_fi], preferred)
self.assertEquals([], acceptable)
def test_select_en_higher_priority_than_fi_with_en_edition(self):
self.request._environ['ACCEPT_LANGUAGE'] = 'en,fi;q=0.8'
edition = self.page.addEdition(['lang:en'])
preferred, acceptable = self._select()
self.assertEquals([edition], preferred)
self.assertEquals([], acceptable)
def test_select_en_higher_priority_than_fi_with_en_and_fi_editions(self):
self.request._environ['ACCEPT_LANGUAGE'] = 'en,fi;q=0.8'
edition_en = self.page.addEdition(['lang:en'])
edition_fi = self.page.addEdition(['lang:fi'])
preferred, acceptable = self._select()
self.assertEquals([edition_en], preferred)
self.assertEquals([edition_fi], acceptable)
def test_select_en_with_multiple_en_editions(self):
self.request._environ['ACCEPT_LANGUAGE'] = 'en'
edition_en_draft = self.page.addEdition(['lang:en', 'draft'])
edition_en_published = self.page.addEdition(['lang:en', 'published'])
preferred, acceptable = self._select()
self.assertEquals(
[edition_en_draft, edition_en_published], preferred)
self.assertEquals([], acceptable)
| 42.847368
| 97
| 0.687876
| 919
| 8,141
| 5.816104
| 0.09358
| 0.131712
| 0.053508
| 0.119364
| 0.822451
| 0.792516
| 0.787091
| 0.775865
| 0.72928
| 0.71768
| 0
| 0.002429
| 0.190886
| 8,141
| 189
| 98
| 43.074074
| 0.809018
| 0.014003
| 0
| 0.658228
| 0
| 0
| 0.076175
| 0
| 0
| 0
| 0
| 0
| 0.278481
| 1
| 0.158228
| false
| 0
| 0.063291
| 0
| 0.234177
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
49b2289f66b44c3b32a08a8e0523ea59c70cea3c
| 238
|
py
|
Python
|
openiec/__init__.py
|
niamorelreillet/openiec_with_OC
|
9e027c7052ca98398bf09758bc05b3daf1aba151
|
[
"MIT"
] | 25
|
2019-04-26T16:33:45.000Z
|
2021-11-15T01:34:13.000Z
|
openiec/__init__.py
|
PengWei97/openiec
|
ed423706c124de7a914fa9319c14d2cab531f266
|
[
"MIT"
] | 1
|
2019-07-10T17:56:52.000Z
|
2019-07-10T18:00:10.000Z
|
openiec/__init__.py
|
PengWei97/openiec
|
ed423706c124de7a914fa9319c14d2cab531f266
|
[
"MIT"
] | 15
|
2019-05-01T16:06:10.000Z
|
2021-11-11T02:28:04.000Z
|
from openiec.calculate.calcsigma import SigmaPure, SigmaSolLiq, SigmaCoherent
from openiec.property.molarvolume import MolarVolume, InterficialMolarVolume
from openiec.property.meltingenthalpy import MeltingEnthalpy
# binary
# ternary
| 26.444444
| 77
| 0.861345
| 23
| 238
| 8.913043
| 0.608696
| 0.160976
| 0.185366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092437
| 238
| 8
| 78
| 29.75
| 0.949074
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49c8e5e6e8c82a26935e156b9dbf51881947316c
| 527
|
py
|
Python
|
kaybee/plugins/__init__.py
|
pauleveritt/kaybee
|
a00a718aaaa23b2d12db30dfacb6b2b6ec84459c
|
[
"Apache-2.0"
] | 2
|
2017-11-08T19:55:57.000Z
|
2018-12-21T12:41:41.000Z
|
kaybee/plugins/__init__.py
|
pauleveritt/kaybee
|
a00a718aaaa23b2d12db30dfacb6b2b6ec84459c
|
[
"Apache-2.0"
] | null | null | null |
kaybee/plugins/__init__.py
|
pauleveritt/kaybee
|
a00a718aaaa23b2d12db30dfacb6b2b6ec84459c
|
[
"Apache-2.0"
] | 1
|
2018-10-13T08:59:29.000Z
|
2018-10-13T08:59:29.000Z
|
import kaybee.plugins.articles
import kaybee.plugins.debugdumper.handlers
import kaybee.plugins.genericpage.handlers
import kaybee.plugins.localtemplates.handlers
import kaybee.plugins.queries.handlers
import kaybee.plugins.references.handlers
import kaybee.plugins.references.reference
import kaybee.plugins.resources.handlers
import kaybee.plugins.resources.resource
import kaybee.plugins.settings.handlers
import kaybee.plugins.sphinx_app.handlers
import kaybee.plugins.widgets.handlers
import kaybee.plugins.widgets.widget
| 37.642857
| 45
| 0.878558
| 65
| 527
| 7.107692
| 0.276923
| 0.337662
| 0.534632
| 0.525974
| 0.307359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049336
| 527
| 13
| 46
| 40.538462
| 0.922156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49d0961be695e7867b915fe620f58b12989366b0
| 297
|
py
|
Python
|
src/colusa/logs.py
|
huuhoa/symphony
|
f8a364649634b4d864771b2c8a3103b714b6b9e2
|
[
"MIT"
] | 6
|
2020-08-29T04:14:15.000Z
|
2020-09-18T10:53:59.000Z
|
src/colusa/logs.py
|
huuhoa/colusa
|
07a0a60680c8085c5dca522e0237f7b5a5181dcb
|
[
"MIT"
] | 34
|
2021-09-07T15:17:38.000Z
|
2022-03-25T15:16:40.000Z
|
src/colusa/logs.py
|
huuhoa/colusa
|
07a0a60680c8085c5dca522e0237f7b5a5181dcb
|
[
"MIT"
] | 2
|
2020-08-29T04:21:35.000Z
|
2020-09-13T17:36:06.000Z
|
from colusa import colors
def error(msg, *args, **kwargs):
print(colors.red("[ERROR]"), msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
print(colors.yellow("[WARN]"), msg, *args, **kwargs)
def info(msg, *args, **kwargs):
print(colors.green("[INFO]"), msg, *args, **kwargs)
| 21.214286
| 56
| 0.612795
| 40
| 297
| 4.55
| 0.375
| 0.230769
| 0.428571
| 0.296703
| 0.395604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154882
| 297
| 13
| 57
| 22.846154
| 0.7251
| 0
| 0
| 0
| 0
| 0
| 0.063973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0
| 0.571429
| 0.428571
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
|
0
| 6
|
49fcc5798ee8566b8c71e79b74e552fe198bbb14
| 179
|
py
|
Python
|
features/examples/modules/invalid_required_and_default.py
|
jetavator/wysdom
|
4c67c82a9df66370da5cf5347abd7450a52d3d03
|
[
"Apache-2.0"
] | 1
|
2021-04-20T07:40:28.000Z
|
2021-04-20T07:40:28.000Z
|
features/examples/modules/invalid_required_and_default.py
|
jetavator/wysdom
|
4c67c82a9df66370da5cf5347abd7450a52d3d03
|
[
"Apache-2.0"
] | 69
|
2020-05-13T07:13:49.000Z
|
2021-05-06T18:26:21.000Z
|
features/examples/modules/invalid_required_and_default.py
|
jetavator/wysdom
|
4c67c82a9df66370da5cf5347abd7450a52d3d03
|
[
"Apache-2.0"
] | null | null | null |
from wysdom import UserObject, UserProperty
class Person(UserObject):
first_name: str = UserProperty(str)
last_name: str = UserProperty(str, default="", optional=False)
| 25.571429
| 66
| 0.748603
| 21
| 179
| 6.285714
| 0.666667
| 0.106061
| 0.287879
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150838
| 179
| 6
| 67
| 29.833333
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
3f85c408cbd335e10ce13c61306a321b16e1fd86
| 422
|
py
|
Python
|
slack/web/classes/attachments.py
|
timgates42/python-slack-sdk
|
6339fbe81031c9aec3f95927ac03706fd31f3544
|
[
"MIT"
] | 2,486
|
2016-11-03T14:31:43.000Z
|
2020-10-26T23:07:44.000Z
|
slack/web/classes/attachments.py
|
timgates42/python-slack-sdk
|
6339fbe81031c9aec3f95927ac03706fd31f3544
|
[
"MIT"
] | 721
|
2016-11-03T21:26:56.000Z
|
2020-10-26T12:41:29.000Z
|
slack/web/classes/attachments.py
|
timgates42/python-slack-sdk
|
6339fbe81031c9aec3f95927ac03706fd31f3544
|
[
"MIT"
] | 627
|
2016-11-02T19:04:19.000Z
|
2020-10-25T19:21:13.000Z
|
from slack_sdk.models.attachments import Attachment # noqa
from slack_sdk.models.attachments import AttachmentField # noqa
from slack_sdk.models.attachments import BlockAttachment # noqa
from slack_sdk.models.attachments import InteractiveAttachment # noqa
from slack_sdk.models.attachments import SeededColors # noqa
from slack import deprecation
deprecation.show_message(__name__, "slack_sdk.models.attachments")
| 42.2
| 70
| 0.843602
| 52
| 422
| 6.634615
| 0.307692
| 0.156522
| 0.243478
| 0.434783
| 0.553623
| 0.553623
| 0.452174
| 0
| 0
| 0
| 0
| 0
| 0.101896
| 422
| 9
| 71
| 46.888889
| 0.91029
| 0.056872
| 0
| 0
| 0
| 0
| 0.071429
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3f91cbb3e51a526aab162637b1f786feeba38b09
| 19,335
|
py
|
Python
|
objectives.py
|
ShuaiW/kaggle-heart
|
022997f27add953c74af2b371c67d9d86cbdccc3
|
[
"MIT"
] | 182
|
2016-03-15T01:51:29.000Z
|
2021-04-21T09:49:05.000Z
|
objectives.py
|
weidezhang/kaggle-heart
|
022997f27add953c74af2b371c67d9d86cbdccc3
|
[
"MIT"
] | 1
|
2018-06-22T16:46:12.000Z
|
2018-06-22T21:08:09.000Z
|
objectives.py
|
weidezhang/kaggle-heart
|
022997f27add953c74af2b371c67d9d86cbdccc3
|
[
"MIT"
] | 61
|
2016-03-15T00:58:28.000Z
|
2020-03-06T22:00:41.000Z
|
"""Library implementing different objective functions.
"""
import numpy as np
import lasagne
import theano
import theano.tensor as T
import theano_printer
import utils
class TargetVarDictObjective(object):
def __init__(self, input_layers, penalty=0):
try:
self.target_vars
except:
self.target_vars = dict()
self.penalty = penalty
def get_loss(self, average=True, *args, **kwargs):
"""Compute the loss in Theano.
Args:
average: Indicates whether the loss should already be averaged over the batch.
If not, call the compute_average method on the aggregated losses.
"""
raise NotImplementedError
def compute_average(self, losses, loss_name=""):
"""Averages the aggregated losses in Numpy."""
return losses.mean(axis=0)
def get_kaggle_loss(self, average=True, *args, **kwargs):
"""Computes the CRPS score in Theano."""
return theano.shared([-1])
def get_segmentation_loss(self, average=True, *args, **kwargs):
return theano.shared([-1])
class KaggleObjective(TargetVarDictObjective):
"""
This is the objective as defined by Kaggle: https://www.kaggle.com/c/second-annual-data-science-bowl/details/evaluation
"""
def __init__(self, input_layers, *args, **kwargs):
super(KaggleObjective, self).__init__(input_layers, *args, **kwargs)
self.input_systole = input_layers["systole"]
self.input_diastole = input_layers["diastole"]
self.target_vars["systole"] = T.fmatrix("systole_target")
self.target_vars["diastole"] = T.fmatrix("diastole_target")
def get_loss(self, average=True, other_losses={}, *args, **kwargs):
network_systole = lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs)
network_diastole = lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs)
systole_target = self.target_vars["systole"]
diastole_target = self.target_vars["diastole"]
CRPS_systole = T.mean((network_systole - systole_target)**2, axis=(1,))
CRPS_diastole = T.mean((network_diastole - diastole_target)**2, axis=(1,))
loss = 0.5*CRPS_systole + 0.5*CRPS_diastole
if average:
loss = T.mean(loss, axis=(0,))
CRPS_systole = T.mean(CRPS_systole, axis=(0,))
CRPS_diastole = T.mean(CRPS_diastole, axis=(0,))
other_losses['CRPS_systole'] = CRPS_systole
other_losses['CRPS_diastole'] = CRPS_diastole
return loss + self.penalty
#def get_kaggle_loss(self, *args, **kwargs):
# return self.get_loss(*args, **kwargs)
class MeanKaggleObjective(TargetVarDictObjective):
"""
This is the objective as defined by Kaggle: https://www.kaggle.com/c/second-annual-data-science-bowl/details/evaluation
"""
def __init__(self, input_layers, *args, **kwargs):
super(MeanKaggleObjective, self).__init__(input_layers, *args, **kwargs)
self.input_average = input_layers["average"]
self.target_vars["average"] = T.fmatrix("average_target")
self.input_systole = input_layers["systole"]
self.input_diastole = input_layers["diastole"]
self.target_vars["systole"] = T.fmatrix("systole_target")
self.target_vars["diastole"] = T.fmatrix("diastole_target")
def get_loss(self, average=True, other_losses={}, *args, **kwargs):
network_average = lasagne.layers.helper.get_output(self.input_average, *args, **kwargs)
network_systole = lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs)
network_diastole = lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs)
average_target = self.target_vars["average"]
systole_target = self.target_vars["systole"]
diastole_target = self.target_vars["diastole"]
CRPS_average = T.mean((network_average - average_target)**2, axis=(1,))
CRPS_systole = T.mean((network_systole - systole_target)**2, axis=(1,))
CRPS_diastole = T.mean((network_diastole - diastole_target)**2, axis=(1,))
loss = 0.2*CRPS_average + 0.4*CRPS_systole + 0.4*CRPS_diastole
if average:
loss = T.mean(loss, axis=(0,))
CRPS_average = T.mean(CRPS_average, axis=(0,))
CRPS_systole = T.mean(CRPS_systole, axis=(0,))
CRPS_diastole = T.mean(CRPS_diastole, axis=(0,))
other_losses['CRPS_average'] = CRPS_average
other_losses['CRPS_systole'] = CRPS_systole
other_losses['CRPS_diastole'] = CRPS_diastole
return loss + self.penalty
#def get_kaggle_loss(self, *args, **kwargs):
# return self.get_loss(*args, **kwargs)
class MSEObjective(TargetVarDictObjective):
def __init__(self, input_layers, *args, **kwargs):
super(MSEObjective, self).__init__(input_layers, *args, **kwargs)
self.input_systole = input_layers["systole:value"]
self.input_diastole = input_layers["diastole:value"]
self.target_vars["systole:value"] = T.fvector("systole_target_value")
self.target_vars["diastole:value"] = T.fvector("diastole_target_value")
def get_loss(self, average=True, *args, **kwargs):
network_systole = lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs)[:,0]
network_diastole = lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs)[:,0]
systole_target = self.target_vars["systole:value"]
diastole_target = self.target_vars["diastole:value"]
loss = 0.5 * (network_systole - systole_target )**2 + 0.5 * (network_diastole - diastole_target)**2
if average:
loss = T.mean(loss, axis=(0,))
return loss + self.penalty
class RMSEObjective(TargetVarDictObjective):
def __init__(self, input_layers, *args, **kwargs):
super(RMSEObjective, self).__init__(input_layers, *args, **kwargs)
self.input_systole = input_layers["systole:value"]
self.input_diastole = input_layers["diastole:value"]
self.target_vars["systole:value"] = T.fvector("systole_target_value")
self.target_vars["diastole:value"] = T.fvector("diastole_target_value")
def get_loss(self, average=True, *args, **kwargs):
network_systole = lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs)[:,0]
network_diastole = lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs)[:,0]
systole_target = self.target_vars["systole:value"]
diastole_target = self.target_vars["diastole:value"]
loss = 0.5 * (network_systole - systole_target) ** 2 + 0.5 * (network_diastole - diastole_target)**2
if average:
loss = T.sqrt(T.mean(loss, axis=(0,)))
return loss
def compute_average(self, aggregate):
return np.sqrt(np.mean(aggregate, axis=0))
def get_kaggle_loss(self, validation=False, average=True, *args, **kwargs):
if not validation: # only evaluate this one in the validation step
return theano.shared([-1])
network_systole = utils.theano_mu_sigma_erf(lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs)[:,0],
lasagne.layers.helper.get_output(self.input_systole_sigma, *args, **kwargs)[:,0])
network_diastole = utils.theano_mu_sigma_erf(lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs)[:,0],
lasagne.layers.helper.get_output(self.input_diastole_sigma, *args, **kwargs)[:,0])
systole_target = self.target_vars["systole"]
diastole_target = self.target_vars["diastole"]
if not average:
CRPS = (T.mean((network_systole - systole_target)**2, axis = (1,)) +
T.mean((network_diastole - diastole_target)**2, axis = (1,)) )/2
return CRPS
else:
CRPS = (T.mean((network_systole - systole_target)**2, axis = (0,1)) +
T.mean((network_diastole - diastole_target)**2, axis = (0,1)) )/2
return CRPS
class KaggleValidationMSEObjective(MSEObjective):
"""
This is the objective as defined by Kaggle: https://www.kaggle.com/c/second-annual-data-science-bowl/details/evaluation
"""
def __init__(self, input_layers, *args, **kwargs):
super(KaggleValidationMSEObjective, self).__init__(input_layers, *args, **kwargs)
self.target_vars["systole"] = T.fmatrix("systole_target_kaggle")
self.target_vars["diastole"] = T.fmatrix("diastole_target_kaggle")
def get_kaggle_loss(self, validation=False, average=True, *args, **kwargs):
if not validation: # only evaluate this one in the validation step
return theano.shared([-1])
sigma = T.sqrt(self.get_loss() - self.penalty)
network_systole = utils.theano_mu_sigma_erf(lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs)[:,0],
sigma)
network_diastole = utils.theano_mu_sigma_erf(lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs)[:,0],
sigma)
systole_target = self.target_vars["systole"]
diastole_target = self.target_vars["diastole"]
if not average:
CRPS = (T.mean((network_systole - systole_target)**2, axis = (1,)) +
T.mean((network_diastole - diastole_target)**2, axis = (1,)) )/2
return CRPS
else:
CRPS = (T.mean((network_systole - systole_target)**2, axis = (0,1)) +
T.mean((network_diastole - diastole_target)**2, axis = (0,1)) )/2
return CRPS
def _theano_pdf_to_cdf(pdfs):
return T.extra_ops.cumsum(pdfs, axis=1)
def _crps(cdfs1, cdfs2):
return T.mean((cdfs1 - cdfs2)**2, axis=(1,))
class LogLossObjective(TargetVarDictObjective):
def __init__(self, input_layers, *args, **kwargs):
super(LogLossObjective, self).__init__(input_layers, *args, **kwargs)
self.input_systole = input_layers["systole:onehot"]
self.input_diastole = input_layers["diastole:onehot"]
self.target_vars["systole:onehot"] = T.fmatrix("systole_target_onehot")
self.target_vars["diastole:onehot"] = T.fmatrix("diastole_target_onehot")
def get_loss(self, average=True, other_losses={}, *args, **kwargs):
network_systole = lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs)
network_diastole = lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs)
systole_target = self.target_vars["systole:onehot"]
diastole_target = self.target_vars["diastole:onehot"]
ll_sys = log_loss(network_systole, systole_target)
ll_dia = log_loss(network_diastole, diastole_target)
ll = 0.5 * ll_sys + 0.5 * ll_dia
# CRPS scores
cdf = _theano_pdf_to_cdf
CRPS_systole = _crps(cdf(network_systole), cdf(systole_target))
CRPS_diastole = _crps(cdf(network_diastole), cdf(diastole_target))
if average:
ll = T.mean(ll, axis=(0,))
CRPS_systole = T.mean(CRPS_systole, axis=(0,))
CRPS_diastole = T.mean(CRPS_diastole, axis=(0,))
other_losses['CRPS_systole'] = CRPS_systole
other_losses['CRPS_diastole'] = CRPS_diastole
return ll + self.penalty
class KaggleValidationLogLossObjective(LogLossObjective):
"""
This is the objective as defined by Kaggle: https://www.kaggle.com/c/second-annual-data-science-bowl/details/evaluation
"""
def __init__(self, input_layers, *args, **kwargs):
super(KaggleValidationLogLossObjective, self).__init__(input_layers, *args, **kwargs)
self.target_vars["systole"] = T.fmatrix("systole_target_kaggle")
self.target_vars["diastole"] = T.fmatrix("diastole_target_kaggle")
def get_kaggle_loss(self, validation=False, average=True, *args, **kwargs):
if not validation:
return theano.shared([-1])
network_systole = T.clip(T.extra_ops.cumsum(lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs), axis=1), 0.0, 1.0)
network_diastole = T.clip(T.extra_ops.cumsum(lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs), axis=1), 0.0, 1.0)
systole_target = self.target_vars["systole"]
diastole_target = self.target_vars["diastole"]
if not average:
CRPS = (T.mean((network_systole - systole_target)**2, axis = (1,)) +
T.mean((network_diastole - diastole_target)**2, axis = (1,)) )/2
return CRPS
else:
CRPS = (T.mean((network_systole - systole_target)**2, axis = (0,1)) +
T.mean((network_diastole - diastole_target)**2, axis = (0,1)) )/2
return CRPS
def log_loss(y, t, eps=1e-7):
"""
cross entropy loss, summed over classes, mean over batches
"""
y = T.clip(y, eps, 1 - eps)
loss = -T.mean(t * np.log(y) + (1-t) * np.log(1-y), axis=(1,))
return loss
class WeightedLogLossObjective(TargetVarDictObjective):
def __init__(self, input_layers, *args, **kwargs):
super(WeightedLogLossObjective, self).__init__(input_layers, *args, **kwargs)
self.input_systole = input_layers["systole:onehot"]
self.input_diastole = input_layers["diastole:onehot"]
self.target_vars["systole"] = T.fmatrix("systole_target")
self.target_vars["diastole"] = T.fmatrix("diastole_target")
self.target_vars["systole:onehot"] = T.fmatrix("systole_target_onehot")
self.target_vars["diastole:onehot"] = T.fmatrix("diastole_target_onehot")
self.target_vars["systole:class_weight"] = T.fmatrix("systole_target_weights")
self.target_vars["diastole:class_weight"] = T.fmatrix("diastole_target_weights")
def get_loss(self, *args, **kwargs):
network_systole = lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs)
network_diastole = lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs)
systole_target = self.target_vars["systole:onehot"]
diastole_target = self.target_vars["diastole:onehot"]
systole_weights = self.target_vars["systole:class_weight"]
diastole_weights = self.target_vars["diastole:class_weight"]
if "average" in kwargs and not kwargs["average"]:
ll = 0.5 * weighted_log_loss(network_systole, systole_target, weights=systole_weights) + \
0.5 * weighted_log_loss(network_diastole, diastole_target, weights=diastole_weights)
return ll
ll = 0.5 * T.mean(weighted_log_loss(network_systole, systole_target, weights=systole_weights), axis = (0,)) + \
0.5 * T.mean(weighted_log_loss(network_diastole, diastole_target, weights=diastole_weights), axis = (0,))
return ll + self.penalty
def get_kaggle_loss(self, validation=False, average=True, *args, **kwargs):
if not validation:
return theano.shared([-1])
network_systole = T.clip(T.extra_ops.cumsum(lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs), axis=1), 0.0, 1.0).astype('float32')
network_diastole = T.clip(T.extra_ops.cumsum(lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs), axis=1), 0.0, 1.0).astype('float32')
systole_target = self.target_vars["systole"].astype('float32')
diastole_target = self.target_vars["diastole"].astype('float32')
if not average:
CRPS = T.mean((network_systole - systole_target)**2 + (network_diastole - diastole_target)**2, axis = 1)/2
return CRPS
else:
CRPS = (T.mean((network_systole - systole_target)**2, axis = (0,1)) +
T.mean((network_diastole - diastole_target)**2, axis = (0,1)) )/2
theano_printer.print_me_this("CRPS", CRPS)
return CRPS
def weighted_log_loss(y, t, weights, eps=1e-7):
"""
cross entropy loss, summed over classes, mean over batches
"""
y = T.clip(y, eps, 1 - eps)
loss = -T.mean(weights * (t * np.log(y) + (1-t) * np.log(1-y)), axis=(1,))
return loss
class BinaryCrossentropyImageObjective(TargetVarDictObjective):
def __init__(self, input_layers, *args, **kwargs):
super(BinaryCrossentropyImageObjective, self).__init__(input_layers, *args, **kwargs)
self.input_layer = input_layers["segmentation"]
self.target_vars = dict()
self.target_vars["segmentation"] = T.ftensor3("segmentation_target")
def get_loss(self, *args, **kwargs):
network_output = lasagne.layers.helper.get_output(self.input_layer, *args, **kwargs)
segmentation_target = self.target_vars["segmentation"]
if "average" in kwargs and not kwargs["average"]:
loss = log_loss( network_output.flatten(ndim=2), segmentation_target.flatten(ndim=2) )
return loss
return T.mean(log_loss(network_output.flatten(ndim=2), segmentation_target.flatten(ndim=2))) + self.penalty
class MixedKaggleSegmentationObjective(KaggleObjective, BinaryCrossentropyImageObjective):
def __init__(self, input_layers, segmentation_weight=1.0, *args, **kwargs):
super(MixedKaggleSegmentationObjective, self).__init__(input_layers, *args, **kwargs)
self.segmentation_weight = segmentation_weight
def get_loss(self, *args, **kwargs):
return self.get_kaggle_loss(*args, **kwargs) + self.segmentation_weight * self.get_segmentation_loss(*args, **kwargs)
def get_kaggle_loss(self, *args, **kwargs):
return KaggleObjective.get_loss(self, *args, **kwargs)
def get_segmentation_loss(self, *args, **kwargs):
return BinaryCrossentropyImageObjective.get_loss(self, *args, **kwargs)
class UpscaledImageObjective(BinaryCrossentropyImageObjective):
def get_loss(self, *args, **kwargs):
network_output = lasagne.layers.helper.get_output(self.input_layer, *args, **kwargs)
segmentation_target = self.target_vars["segmentation"]
return log_loss(network_output.flatten(ndim=2), segmentation_target[:,4::8,4::8].flatten(ndim=2)) + self.penalty
class R2Objective(TargetVarDictObjective):
def __init__(self, input_layers, *args, **kwargs):
super(R2Objective, self).__init__(input_layers, *args, **kwargs)
self.input_systole = input_layers["systole"]
self.input_diastole = input_layers["diastole"]
self.target_vars["systole"] = T.fvector("systole_target")
self.target_vars["diastole"] = T.fvector("diastole_target")
def get_loss(self, *args, **kwargs):
network_systole = lasagne.layers.helper.get_output(self.input_systole, *args, **kwargs)
network_diastole = lasagne.layers.helper.get_output(self.input_diastole, *args, **kwargs)
systole_target = self.target_vars["systole"]
diastole_target = self.target_vars["diastole"]
return T.sum((network_diastole-diastole_target)**2) + T.sum((network_systole-systole_target)**2) + self.penalty
| 45.175234
| 161
| 0.659685
| 2,367
| 19,335
| 5.143219
| 0.067174
| 0.062428
| 0.0621
| 0.049285
| 0.811812
| 0.785444
| 0.759487
| 0.735502
| 0.713077
| 0.67907
| 0
| 0.012285
| 0.208534
| 19,335
| 427
| 162
| 45.28103
| 0.783245
| 0.061546
| 0
| 0.640845
| 0
| 0
| 0.073473
| 0.016729
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130282
| false
| 0
| 0.021127
| 0.024648
| 0.316901
| 0.007042
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3fd434c80ea8c5daf17ce5095daf9104b981ca79
| 128
|
py
|
Python
|
src/ploomber/executors/__init__.py
|
MarcoJHB/ploomber
|
4849ef6915572f7934392443b4faf138172b9596
|
[
"Apache-2.0"
] | 2,141
|
2020-02-14T02:34:34.000Z
|
2022-03-31T22:43:20.000Z
|
src/ploomber/executors/__init__.py
|
MarcoJHB/ploomber
|
4849ef6915572f7934392443b4faf138172b9596
|
[
"Apache-2.0"
] | 660
|
2020-02-06T16:15:57.000Z
|
2022-03-31T22:55:01.000Z
|
src/ploomber/executors/__init__.py
|
MarcoJHB/ploomber
|
4849ef6915572f7934392443b4faf138172b9596
|
[
"Apache-2.0"
] | 122
|
2020-02-14T18:53:05.000Z
|
2022-03-27T22:33:24.000Z
|
from ploomber.executors.serial import Serial
from ploomber.executors.parallel import Parallel
__all__ = ['Serial', 'Parallel']
| 25.6
| 48
| 0.804688
| 15
| 128
| 6.6
| 0.466667
| 0.242424
| 0.424242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101563
| 128
| 4
| 49
| 32
| 0.86087
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3ff759a897a5bdd75c59bc5c66b289ffa1009a51
| 36,981
|
py
|
Python
|
sdk/python/pulumi_azure_native/insights/v20140401/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/insights/v20140401/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/insights/v20140401/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'LocationThresholdRuleConditionResponse',
'ManagementEventAggregationConditionResponse',
'ManagementEventRuleConditionResponse',
'RuleEmailActionResponse',
'RuleManagementEventClaimsDataSourceResponse',
'RuleManagementEventDataSourceResponse',
'RuleMetricDataSourceResponse',
'RuleWebhookActionResponse',
'ThresholdRuleConditionResponse',
]
@pulumi.output_type
class LocationThresholdRuleConditionResponse(dict):
"""
A rule condition based on a certain number of locations failing.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "failedLocationCount":
suggest = "failed_location_count"
elif key == "odataType":
suggest = "odata_type"
elif key == "dataSource":
suggest = "data_source"
elif key == "windowSize":
suggest = "window_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LocationThresholdRuleConditionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LocationThresholdRuleConditionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LocationThresholdRuleConditionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
failed_location_count: int,
odata_type: str,
data_source: Optional[Any] = None,
window_size: Optional[str] = None):
"""
A rule condition based on a certain number of locations failing.
:param int failed_location_count: the number of locations that must fail to activate the alert.
:param str odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition'.
:param Union['RuleManagementEventDataSourceResponse', 'RuleMetricDataSourceResponse'] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param str window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "failed_location_count", failed_location_count)
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition')
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="failedLocationCount")
def failed_location_count(self) -> int:
"""
the number of locations that must fail to activate the alert.
"""
return pulumi.get(self, "failed_location_count")
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[Any]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[str]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@pulumi.output_type
class ManagementEventAggregationConditionResponse(dict):
"""
How the data that is collected should be combined over time.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "windowSize":
suggest = "window_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ManagementEventAggregationConditionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ManagementEventAggregationConditionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ManagementEventAggregationConditionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
operator: Optional[str] = None,
threshold: Optional[float] = None,
window_size: Optional[str] = None):
"""
How the data that is collected should be combined over time.
:param str operator: the condition operator.
:param float threshold: The threshold value that activates the alert.
:param str window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
if operator is not None:
pulumi.set(__self__, "operator", operator)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
the condition operator.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def threshold(self) -> Optional[float]:
"""
The threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[str]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@pulumi.output_type
class ManagementEventRuleConditionResponse(dict):
"""
A management event rule condition.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "odataType":
suggest = "odata_type"
elif key == "dataSource":
suggest = "data_source"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ManagementEventRuleConditionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ManagementEventRuleConditionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ManagementEventRuleConditionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
odata_type: str,
aggregation: Optional['outputs.ManagementEventAggregationConditionResponse'] = None,
data_source: Optional[Any] = None):
"""
A management event rule condition.
:param str odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition'.
:param 'ManagementEventAggregationConditionResponse' aggregation: How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
:param Union['RuleManagementEventDataSourceResponse', 'RuleMetricDataSourceResponse'] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition')
if aggregation is not None:
pulumi.set(__self__, "aggregation", aggregation)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter
def aggregation(self) -> Optional['outputs.ManagementEventAggregationConditionResponse']:
"""
How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
"""
return pulumi.get(self, "aggregation")
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[Any]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@pulumi.output_type
class RuleEmailActionResponse(dict):
"""
Specifies the action to send email when the rule condition is evaluated. The discriminator is always RuleEmailAction in this case.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "odataType":
suggest = "odata_type"
elif key == "customEmails":
suggest = "custom_emails"
elif key == "sendToServiceOwners":
suggest = "send_to_service_owners"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RuleEmailActionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RuleEmailActionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RuleEmailActionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
odata_type: str,
custom_emails: Optional[Sequence[str]] = None,
send_to_service_owners: Optional[bool] = None):
"""
Specifies the action to send email when the rule condition is evaluated. The discriminator is always RuleEmailAction in this case.
:param str odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction'.
:param Sequence[str] custom_emails: the list of administrator's custom email addresses to notify of the activation of the alert.
:param bool send_to_service_owners: Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction')
if custom_emails is not None:
pulumi.set(__self__, "custom_emails", custom_emails)
if send_to_service_owners is not None:
pulumi.set(__self__, "send_to_service_owners", send_to_service_owners)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter(name="customEmails")
def custom_emails(self) -> Optional[Sequence[str]]:
"""
the list of administrator's custom email addresses to notify of the activation of the alert.
"""
return pulumi.get(self, "custom_emails")
@property
@pulumi.getter(name="sendToServiceOwners")
def send_to_service_owners(self) -> Optional[bool]:
"""
Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
return pulumi.get(self, "send_to_service_owners")
@pulumi.output_type
class RuleManagementEventClaimsDataSourceResponse(dict):
"""
The claims for a rule management event data source.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "emailAddress":
suggest = "email_address"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RuleManagementEventClaimsDataSourceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RuleManagementEventClaimsDataSourceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RuleManagementEventClaimsDataSourceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
email_address: Optional[str] = None):
"""
The claims for a rule management event data source.
:param str email_address: the email address.
"""
if email_address is not None:
pulumi.set(__self__, "email_address", email_address)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> Optional[str]:
"""
the email address.
"""
return pulumi.get(self, "email_address")
@pulumi.output_type
class RuleManagementEventDataSourceResponse(dict):
"""
A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "odataType":
suggest = "odata_type"
elif key == "eventName":
suggest = "event_name"
elif key == "eventSource":
suggest = "event_source"
elif key == "legacyResourceId":
suggest = "legacy_resource_id"
elif key == "metricNamespace":
suggest = "metric_namespace"
elif key == "operationName":
suggest = "operation_name"
elif key == "resourceGroupName":
suggest = "resource_group_name"
elif key == "resourceLocation":
suggest = "resource_location"
elif key == "resourceProviderName":
suggest = "resource_provider_name"
elif key == "resourceUri":
suggest = "resource_uri"
elif key == "subStatus":
suggest = "sub_status"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RuleManagementEventDataSourceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RuleManagementEventDataSourceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RuleManagementEventDataSourceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
odata_type: str,
claims: Optional['outputs.RuleManagementEventClaimsDataSourceResponse'] = None,
event_name: Optional[str] = None,
event_source: Optional[str] = None,
legacy_resource_id: Optional[str] = None,
level: Optional[str] = None,
metric_namespace: Optional[str] = None,
operation_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_location: Optional[str] = None,
resource_provider_name: Optional[str] = None,
resource_uri: Optional[str] = None,
status: Optional[str] = None,
sub_status: Optional[str] = None):
"""
A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case.
:param str odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'.
:param 'RuleManagementEventClaimsDataSourceResponse' claims: the claims.
:param str event_name: the event name.
:param str event_source: the event source.
:param str legacy_resource_id: the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param str level: the level.
:param str metric_namespace: the namespace of the metric.
:param str operation_name: The name of the operation that should be checked for. If no name is provided, any operation will match.
:param str resource_group_name: the resource group name.
:param str resource_location: the location of the resource.
:param str resource_provider_name: the resource provider name.
:param str resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param str status: The status of the operation that should be checked for. If no status is provided, any status will match.
:param str sub_status: the substatus.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource')
if claims is not None:
pulumi.set(__self__, "claims", claims)
if event_name is not None:
pulumi.set(__self__, "event_name", event_name)
if event_source is not None:
pulumi.set(__self__, "event_source", event_source)
if legacy_resource_id is not None:
pulumi.set(__self__, "legacy_resource_id", legacy_resource_id)
if level is not None:
pulumi.set(__self__, "level", level)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if operation_name is not None:
pulumi.set(__self__, "operation_name", operation_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_location is not None:
pulumi.set(__self__, "resource_location", resource_location)
if resource_provider_name is not None:
pulumi.set(__self__, "resource_provider_name", resource_provider_name)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
if status is not None:
pulumi.set(__self__, "status", status)
if sub_status is not None:
pulumi.set(__self__, "sub_status", sub_status)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter
def claims(self) -> Optional['outputs.RuleManagementEventClaimsDataSourceResponse']:
"""
the claims.
"""
return pulumi.get(self, "claims")
@property
@pulumi.getter(name="eventName")
def event_name(self) -> Optional[str]:
"""
the event name.
"""
return pulumi.get(self, "event_name")
@property
@pulumi.getter(name="eventSource")
def event_source(self) -> Optional[str]:
"""
the event source.
"""
return pulumi.get(self, "event_source")
@property
@pulumi.getter(name="legacyResourceId")
def legacy_resource_id(self) -> Optional[str]:
"""
the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "legacy_resource_id")
@property
@pulumi.getter
def level(self) -> Optional[str]:
"""
the level.
"""
return pulumi.get(self, "level")
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[str]:
"""
the namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@property
@pulumi.getter(name="operationName")
def operation_name(self) -> Optional[str]:
"""
The name of the operation that should be checked for. If no name is provided, any operation will match.
"""
return pulumi.get(self, "operation_name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[str]:
"""
the resource group name.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
"""
the location of the resource.
"""
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="resourceProviderName")
def resource_provider_name(self) -> Optional[str]:
"""
the resource provider name.
"""
return pulumi.get(self, "resource_provider_name")
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[str]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the operation that should be checked for. If no status is provided, any status will match.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subStatus")
def sub_status(self) -> Optional[str]:
"""
the substatus.
"""
return pulumi.get(self, "sub_status")
@pulumi.output_type
class RuleMetricDataSourceResponse(dict):
"""
A rule metric data source. The discriminator value is always RuleMetricDataSource in this case.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "odataType":
suggest = "odata_type"
elif key == "legacyResourceId":
suggest = "legacy_resource_id"
elif key == "metricName":
suggest = "metric_name"
elif key == "metricNamespace":
suggest = "metric_namespace"
elif key == "resourceLocation":
suggest = "resource_location"
elif key == "resourceUri":
suggest = "resource_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RuleMetricDataSourceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RuleMetricDataSourceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RuleMetricDataSourceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
odata_type: str,
legacy_resource_id: Optional[str] = None,
metric_name: Optional[str] = None,
metric_namespace: Optional[str] = None,
resource_location: Optional[str] = None,
resource_uri: Optional[str] = None):
"""
A rule metric data source. The discriminator value is always RuleMetricDataSource in this case.
:param str odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource'.
:param str legacy_resource_id: the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param str metric_name: the name of the metric that defines what the rule monitors.
:param str metric_namespace: the namespace of the metric.
:param str resource_location: the location of the resource.
:param str resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource')
if legacy_resource_id is not None:
pulumi.set(__self__, "legacy_resource_id", legacy_resource_id)
if metric_name is not None:
pulumi.set(__self__, "metric_name", metric_name)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if resource_location is not None:
pulumi.set(__self__, "resource_location", resource_location)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter(name="legacyResourceId")
def legacy_resource_id(self) -> Optional[str]:
"""
the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "legacy_resource_id")
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> Optional[str]:
"""
the name of the metric that defines what the rule monitors.
"""
return pulumi.get(self, "metric_name")
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[str]:
"""
the namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
"""
the location of the resource.
"""
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[str]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@pulumi.output_type
class RuleWebhookActionResponse(dict):
"""
Specifies the action to post to service when the rule condition is evaluated. The discriminator is always RuleWebhookAction in this case.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "odataType":
suggest = "odata_type"
elif key == "serviceUri":
suggest = "service_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RuleWebhookActionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RuleWebhookActionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RuleWebhookActionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
odata_type: str,
properties: Optional[Mapping[str, str]] = None,
service_uri: Optional[str] = None):
"""
Specifies the action to post to service when the rule condition is evaluated. The discriminator is always RuleWebhookAction in this case.
:param str odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction'.
:param Mapping[str, str] properties: the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
:param str service_uri: the service uri to Post the notification when the alert activates or resolves.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction')
if properties is not None:
pulumi.set(__self__, "properties", properties)
if service_uri is not None:
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, str]]:
"""
the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> Optional[str]:
"""
the service uri to Post the notification when the alert activates or resolves.
"""
return pulumi.get(self, "service_uri")
@pulumi.output_type
class ThresholdRuleConditionResponse(dict):
"""
A rule condition based on a metric crossing a threshold.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "odataType":
suggest = "odata_type"
elif key == "dataSource":
suggest = "data_source"
elif key == "timeAggregation":
suggest = "time_aggregation"
elif key == "windowSize":
suggest = "window_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ThresholdRuleConditionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ThresholdRuleConditionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ThresholdRuleConditionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
odata_type: str,
operator: str,
threshold: float,
data_source: Optional[Any] = None,
time_aggregation: Optional[str] = None,
window_size: Optional[str] = None):
"""
A rule condition based on a metric crossing a threshold.
:param str odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition'.
:param str operator: the operator used to compare the data and the threshold.
:param float threshold: the threshold value that activates the alert.
:param Union['RuleManagementEventDataSourceResponse', 'RuleMetricDataSourceResponse'] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param str time_aggregation: the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
:param str window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition')
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "threshold", threshold)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if time_aggregation is not None:
pulumi.set(__self__, "time_aggregation", time_aggregation)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter
def operator(self) -> str:
"""
the operator used to compare the data and the threshold.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def threshold(self) -> float:
"""
the threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[Any]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@property
@pulumi.getter(name="timeAggregation")
def time_aggregation(self) -> Optional[str]:
"""
the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
"""
return pulumi.get(self, "time_aggregation")
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[str]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
| 43.661157
| 305
| 0.663422
| 4,190
| 36,981
| 5.683055
| 0.061575
| 0.015286
| 0.023476
| 0.03431
| 0.810894
| 0.782463
| 0.750378
| 0.712498
| 0.694272
| 0.682051
| 0
| 0.001333
| 0.249155
| 36,981
| 846
| 306
| 43.712766
| 0.85616
| 0.347746
| 0
| 0.630219
| 1
| 0.017893
| 0.197322
| 0.066102
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157058
| false
| 0
| 0.013917
| 0
| 0.310139
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3ffd353d756db1ba676a102bd66ba916ec9aafda
| 109
|
py
|
Python
|
017/017.py
|
brianchiang-tw/Python_practice
|
9e5f8d554fbf02d47164f62ffa416e966f823ddd
|
[
"MIT"
] | null | null | null |
017/017.py
|
brianchiang-tw/Python_practice
|
9e5f8d554fbf02d47164f62ffa416e966f823ddd
|
[
"MIT"
] | null | null | null |
017/017.py
|
brianchiang-tw/Python_practice
|
9e5f8d554fbf02d47164f62ffa416e966f823ddd
|
[
"MIT"
] | null | null | null |
test_list = [-2, 1, 3, -6]
print(f'before', test_list)
test_list.sort(key=abs)
print(f'before', test_list)
| 15.571429
| 27
| 0.678899
| 21
| 109
| 3.333333
| 0.571429
| 0.457143
| 0.342857
| 0.457143
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.119266
| 109
| 7
| 28
| 15.571429
| 0.6875
| 0
| 0
| 0.5
| 0
| 0
| 0.109091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b76233f7addfab35e4020f8bfc960f97c5ac9e12
| 4,509
|
py
|
Python
|
cifar10_attacks/models.py
|
maloletnik/exploring-blackbox-attacks
|
410864554adbd8a03eca5e2216d91e8ce1dd9312
|
[
"MIT"
] | 55
|
2018-04-06T01:08:46.000Z
|
2021-12-02T13:00:07.000Z
|
cifar10_attacks/models.py
|
maloletnik/exploring-blackbox-attacks
|
410864554adbd8a03eca5e2216d91e8ce1dd9312
|
[
"MIT"
] | 3
|
2018-10-21T07:28:20.000Z
|
2020-10-20T17:31:42.000Z
|
cifar10_attacks/models.py
|
maloletnik/exploring-blackbox-attacks
|
410864554adbd8a03eca5e2216d91e8ce1dd9312
|
[
"MIT"
] | 13
|
2018-02-26T04:21:04.000Z
|
2021-11-30T12:13:02.000Z
|
import tensorflow as tf
reuse_variables = None
def load_model(ckpt_dir, batch_size, input_node, labels_node=None, first_var=0):
print ckpt_dir
global reuse_variables
if any(x in ckpt_dir for x in ['thin_32_pgd']):
import madry_thin_model
print('Using Madry thin model')
input_scaled = tf.map_fn(lambda image: tf.image.per_image_standardization(image), input_node)
m = madry_thin_model.Model('eval', input_scaled, labels_node)
# m._build_model()
my_vars = tf.global_variables()[first_var:]
reuse_variables = True
class Net(object):
def get_logits(self):
return m.pre_softmax
def get_loss(self):
return m.mean_xent
def get_accuracy(self):
return m.accuracy
def load(self, session):
saver = tf.train.Saver(my_vars)
ckpt_state = tf.train.get_checkpoint_state(ckpt_dir)
saver.restore(session, ckpt_state.model_checkpoint_path)
return Net()
if any(x in ckpt_dir for x in ['thin_32', 'thin_32_adv', 'thin_32_ensadv']):
import resnet_model_reusable
print('using thin model')
hps = resnet_model_reusable.HParams(
batch_size=batch_size,
num_classes=10,
min_lrn_rate=None,
lrn_rate=None,
num_residual_units=5,
use_bottleneck=False,
weight_decay_rate=0.,
relu_leakiness=0.1,
optimizer=None,
)
input_scaled = tf.map_fn(lambda image: tf.image.per_image_standardization(image), input_node)
m = resnet_model_reusable.ResNet(hps, input_scaled, labels_node, 'eval', reuse_variables=reuse_variables)
m._build_model()
my_vars = tf.global_variables()[first_var:]
if labels_node is not None:
m._build_cost()
reuse_variables = True
class Net(object):
def get_logits(self):
return m.logits
def get_loss(self):
return m.cost
def load(self, session):
saver = tf.train.Saver(my_vars)
ckpt_state = tf.train.get_checkpoint_state(ckpt_dir)
saver.restore(session, ckpt_state.model_checkpoint_path)
return Net()
if any(x in ckpt_dir for x in ['wide_28_10', 'wide_28_10_adv', 'wide_28_10_ensadv']):
import resnet_model_reusable_wide
hps = resnet_model_reusable_wide.HParams(
batch_size=batch_size,
num_classes=10,
min_lrn_rate=None,
lrn_rate=None,
num_residual_units=4,
use_bottleneck=False,
weight_decay_rate=0.,
relu_leakiness=0.1,
optimizer=None,
)
input_scaled = tf.map_fn(lambda image: tf.image.per_image_standardization(image), input_node)
m = resnet_model_reusable_wide.ResNet(hps, input_scaled, labels_node, 'eval', reuse_variables=reuse_variables)
m._build_model()
if labels_node is not None:
m._build_cost()
my_vars = tf.global_variables()[first_var:]
reuse_variables = True
class Net(object):
def get_logits(self):
return m.logits
def get_loss(self):
return m.cost
def load(self, session):
saver = tf.train.Saver(my_vars)
ckpt_state = tf.train.get_checkpoint_state(ckpt_dir)
saver.restore(session, ckpt_state.model_checkpoint_path)
return Net()
if any(x in ckpt_dir for x in ['tutorial', 'tutorial_adv', 'tutorial_ensadv']):
import cifar10_reusable
cifar10_reusable.FLAGS.batch_size = batch_size
logits = cifar10_reusable.inference(input_node)
if labels_node is not None:
labels_sparse = tf.argmax(labels_node, axis=1)
loss = cifar10_reusable.loss(logits, labels_sparse)
my_vars = tf.global_variables()[first_var:]
reuse_variables = True
class Net(object):
def get_logits(self):
return logits
def get_loss(self):
return loss
def load(self, session):
saver = tf.train.Saver(my_vars)
ckpt_state = tf.train.get_checkpoint_state(ckpt_dir)
saver.restore(session, ckpt_state.model_checkpoint_path)
return Net()
else:
raise
| 40.990909
| 118
| 0.606121
| 569
| 4,509
| 4.486819
| 0.182777
| 0.054837
| 0.030161
| 0.012534
| 0.760674
| 0.736389
| 0.709753
| 0.709753
| 0.709753
| 0.685468
| 0
| 0.013527
| 0.311377
| 4,509
| 109
| 119
| 41.366972
| 0.808696
| 0.003548
| 0
| 0.660377
| 0
| 0
| 0.037631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.04717
| null | null | 0.028302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b766c6352f2f7827e7264b4080ea1294630a3912
| 127
|
py
|
Python
|
autosearch/__init__.py
|
ktnyt/autosearch
|
6944c1956bc2e168afda0ef244f48a8a080f4a92
|
[
"MIT"
] | null | null | null |
autosearch/__init__.py
|
ktnyt/autosearch
|
6944c1956bc2e168afda0ef244f48a8a080f4a92
|
[
"MIT"
] | null | null | null |
autosearch/__init__.py
|
ktnyt/autosearch
|
6944c1956bc2e168afda0ef244f48a8a080f4a92
|
[
"MIT"
] | null | null | null |
from autosearch.searcher import Searcher
from autosearch.parser import Parser
from autosearch.autosearcher import Autosearcher
| 31.75
| 48
| 0.88189
| 15
| 127
| 7.466667
| 0.4
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094488
| 127
| 3
| 49
| 42.333333
| 0.973913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b7e847e0ce6b9106d65812730e44e377d90a5708
| 31
|
py
|
Python
|
hello.py
|
xamevou/someJupyterNotebooks
|
f8975023b22eba22740a52c92c4b76a72757ee7b
|
[
"MIT"
] | null | null | null |
hello.py
|
xamevou/someJupyterNotebooks
|
f8975023b22eba22740a52c92c4b76a72757ee7b
|
[
"MIT"
] | null | null | null |
hello.py
|
xamevou/someJupyterNotebooks
|
f8975023b22eba22740a52c92c4b76a72757ee7b
|
[
"MIT"
] | null | null | null |
print("Saludos desde Binder!")
| 15.5
| 30
| 0.741935
| 4
| 31
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
4d11e4624922c4cb2719ca4e2912b13da65f6d00
| 266
|
py
|
Python
|
jewish/__init__.py
|
meni181818/jewish
|
f8ae37defbcca476f6d38186fdc4075c52618015
|
[
"MIT"
] | 5
|
2016-11-03T17:35:40.000Z
|
2021-02-28T16:05:59.000Z
|
jewish/__init__.py
|
meni181818/jewish
|
f8ae37defbcca476f6d38186fdc4075c52618015
|
[
"MIT"
] | 2
|
2016-01-13T17:16:00.000Z
|
2017-04-18T13:25:41.000Z
|
jewish/__init__.py
|
meni181818/jewish
|
f8ae37defbcca476f6d38186fdc4075c52618015
|
[
"MIT"
] | 6
|
2015-12-09T08:35:40.000Z
|
2022-01-30T22:20:29.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from jewish.date import InvalidDateError
from jewish.date import JewishDate
from jewish.date import JewishDateError
| 29.555556
| 40
| 0.879699
| 34
| 266
| 6.323529
| 0.411765
| 0.186047
| 0.297674
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109023
| 266
| 8
| 41
| 33.25
| 0.907173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.142857
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4d2219ad4dcf4f64491aa491b93b4815948a9d9d
| 200
|
py
|
Python
|
instagram/admin.py
|
israelwangila/insta
|
48653270edd60aabe7d4a42c24032709c2d86c10
|
[
"MIT"
] | 4
|
2020-01-29T04:43:58.000Z
|
2022-03-06T02:50:37.000Z
|
instagram/admin.py
|
israelwangila/insta
|
48653270edd60aabe7d4a42c24032709c2d86c10
|
[
"MIT"
] | 4
|
2021-03-19T00:43:44.000Z
|
2021-09-08T01:00:15.000Z
|
instagram/admin.py
|
israelwangila/insta
|
48653270edd60aabe7d4a42c24032709c2d86c10
|
[
"MIT"
] | 7
|
2020-02-20T06:03:03.000Z
|
2022-03-11T02:57:41.000Z
|
from django.contrib import admin
from .models import Profile,Post,Following,Comment
admin.site.register(Profile)
admin.site.register(Post)
admin.site.register(Following)
admin.site.register(Comment)
| 25
| 50
| 0.83
| 28
| 200
| 5.928571
| 0.428571
| 0.216867
| 0.409639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065
| 200
| 7
| 51
| 28.571429
| 0.887701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4d371ce515d7077e40fa52d86fc1c7a88c942194
| 53,812
|
py
|
Python
|
botstory/integrations/fb/messenger_test.py
|
botstory/bot-story
|
9c5b2fc7f7a14dbd467d70f60d5ba855ef89dac3
|
[
"MIT"
] | 5
|
2017-01-14T13:42:13.000Z
|
2021-07-27T21:52:04.000Z
|
botstory/integrations/fb/messenger_test.py
|
botstory/bot-story
|
9c5b2fc7f7a14dbd467d70f60d5ba855ef89dac3
|
[
"MIT"
] | 235
|
2016-11-07T23:33:28.000Z
|
2018-03-13T11:27:33.000Z
|
botstory/integrations/fb/messenger_test.py
|
hyzhak/bot-story
|
9c5b2fc7f7a14dbd467d70f60d5ba855ef89dac3
|
[
"MIT"
] | 5
|
2017-01-14T13:42:14.000Z
|
2020-11-06T08:33:20.000Z
|
import aiohttp
import asyncio
from botstory.ast import story_context
from botstory.integrations.commonhttp import errors as commonhttp_errors
from botstory.utils import answer
import logging
import unittest
from unittest import mock
import pytest
from . import messenger
from .. import commonhttp, mockdb, mockhttp
from ... import di, Story, utils
from ...middlewares import any, option, sticker
logger = logging.getLogger(__name__)
story = None
def teardown_function(function):
logger.debug('tear down!')
story.clear()
@pytest.mark.asyncio
async def test_send_text_message():
user = utils.build_fake_user()
global story
story = Story()
interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
await interface.send_text_message(
recipient=user, text='hi!', quick_replies=None
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'text': 'hi!',
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_truncate_long_message():
user = utils.build_fake_user()
global story
story = Story()
interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
very_long_message = 'very_long_message' * 100
await interface.send_text_message(
recipient=user,
text=very_long_message,
quick_replies=None,
options={
'overflow': 'cut'
}
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'text': very_long_message[:640],
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_truncate_with_ellipsis_long_message_by_default():
user = utils.build_fake_user()
global story
story = Story()
interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
very_long_message = 'very_long_message' * 100
await interface.send_text_message(
recipient=user,
text=very_long_message,
quick_replies=None,
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'text': very_long_message[:638] + '\u2026',
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_send_list():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
await fb_interface.send_list(
recipient=talk.user,
elements=[{
'title': 'Classic T-Shirt Collection', # (*) required
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/collection.png',
'subtitle': 'See all our colors',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop_collection',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'View',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/collection',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic White T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/white-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=100',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=100',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic Blue T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/blue-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=101',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=101',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic Black T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/black-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=102',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=102',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}], buttons=[{
'title': 'View More',
'payload': 'payload',
}])
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'attachment': {
'type': 'template',
'payload': {
'template_type': 'list',
'top_element_style': 'large',
'elements': [{
'title': 'Classic T-Shirt Collection', # (*) required
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/collection.png',
'subtitle': 'See all our colors',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop_collection',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'View',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/collection',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic White T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/white-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=100',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=100',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic Blue T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/blue-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=101',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=101',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic Black T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/black-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=102',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=102',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}],
'buttons': [
{
'title': 'View More',
'type': 'postback',
'payload': 'payload'
}
]
}
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_should_send_template_based_message():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
payload = {
'template_type': 'receipt',
'recipient_name': 'Stephane Crozatier',
'order_number': '12345678902',
'currency': 'USD',
'payment_method': 'Visa 2345',
'order_url': 'http://petersapparel.parseapp.com/order?order_id=123456',
'timestamp': '1428444852',
'elements': [{
'title': 'Classic White T-Shirt',
'subtitle': '100% Soft and Luxurious Cotton',
'quantity': 2,
'price': 50,
'currency': 'USD',
'image_url': 'http://petersapparel.parseapp.com/img/whiteshirt.png'
}, {
'title': 'Classic Gray T-Shirt',
'subtitle': '100% Soft and Luxurious Cotton',
'quantity': 1,
'price': 25,
'currency': 'USD',
'image_url': 'http://petersapparel.parseapp.com/img/grayshirt.png'
}],
'address': {
'street_1': '1 Hacker Way',
'street_2': '',
'city': 'Menlo Park',
'postal_code': '94025',
'state': 'CA',
'country': 'US'
},
'summary': {
'subtotal': 75.00,
'shipping_cost': 4.95,
'total_tax': 6.19,
'total_cost': 56.14
},
'adjustments': [{
'name': 'New Customer Discount',
'amount': 20
}, {
'name': '$10 Off Coupon',
'amount': 10
}]
}
await fb_interface.send_template(talk.user, payload)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'attachment': {
'type': 'template',
'payload': payload,
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_send_audio():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
await fb_interface.send_audio(talk.user, 'http://shevchenko.ua/speach.mp3')
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'attachment': {
'type': 'audio',
'payload': {
'url': 'http://shevchenko.ua/speach.mp3',
},
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_send_image():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
await fb_interface.send_image(talk.user, 'http://shevchenko.ua/image.gif')
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'attachment': {
'type': 'image',
'payload': {
'url': 'http://shevchenko.ua/image.gif',
},
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
def should_post_attachment(mock_http, talk):
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'attachment': {
'type': 'image',
'payload': {
'url': 'http://shevchenko.ua/image.gif',
},
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_retry_send_image():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface(
post_raise=commonhttp_errors.HttpRequestError(),
))
await story.start()
send_task = fb_interface.send_image(talk.user, 'http://shevchenko.ua/image.gif', options={
'retry_times': 3,
'retry_delay': 0.1,
})
async def lazy_fix_http():
# here should pass first 2 retry
await asyncio.sleep(0.15)
# than we change mock http without post raise
# so on 3 try it should pass without problem
story.use(mockhttp.MockHttpInterface())
await asyncio.gather(
lazy_fix_http(),
send_task,
)
should_post_attachment(mock_http, talk)
@pytest.mark.asyncio
async def test_retry_send_image_should_fail_on_tries_exceed():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface(
post_raise=commonhttp_errors.HttpRequestError(),
))
await story.start()
with pytest.raises(commonhttp_errors.HttpRequestError):
await fb_interface.send_image(talk.user, 'http://shevchenko.ua/image.gif', options={
'retry_times': 3,
'retry_delay': 0.1,
})
should_post_attachment(mock_http, talk)
@pytest.mark.asyncio
async def test_integration():
user = utils.build_fake_user()
global story
story = Story()
story.use(messenger.FBInterface(page_access_token='qwerty2'))
story.use(mockdb.MockDB())
mock_http = story.use(mockhttp.MockHttpInterface())
await story.say('hi there!', user=user)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty2',
},
json={
'message': {
'text': 'hi there!',
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_quick_replies():
user = utils.build_fake_user()
global story
story = Story()
story.use(messenger.FBInterface(page_access_token='qwerty3'))
story.use(mockdb.MockDB())
mock_http = story.use(mockhttp.MockHttpInterface())
await story.ask(
'Which color do you like?',
quick_replies=[{
'title': 'Red',
'payload': 0xff0000,
}, {
'title': 'Green',
'payload': 0x00ff00,
}, {
'title': 'Blue',
'payload': 0x0000ff,
}],
user=user,
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty3',
},
json={
'message': {
'text': 'Which color do you like?',
'quick_replies': [
{
'content_type': 'text',
'title': 'Red',
'payload': 0xff0000,
},
{
'content_type': 'text',
'title': 'Green',
'payload': 0x00ff00,
},
{
'content_type': 'text',
'title': 'Blue',
'payload': 0x0000ff,
},
],
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_quick_replies_with_location():
user = utils.build_fake_user()
global story
story = Story()
story.use(messenger.FBInterface(page_access_token='qwerty3'))
story.use(mockdb.MockDB())
mock_http = story.use(mockhttp.MockHttpInterface())
await story.ask(
'Where do you live?',
quick_replies=[{
'content_type': 'location',
}, {
'title': 'Europe',
'payload': 'SET_LOCATION_EU',
}, {
'title': 'US :',
'payload': 'SET_LOCATION_US',
}, {
'title': 'Ukraine',
'payload': 'SET_LOCATION_UA',
}, ],
user=user,
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty3',
},
json={
'message': {
'text': 'Where do you live?',
'quick_replies': [
{
'content_type': 'location',
}, {
'content_type': 'text',
'title': 'Europe',
'payload': 'SET_LOCATION_EU',
}, {
'content_type': 'text',
'title': 'US :',
'payload': 'SET_LOCATION_US',
}, {
'content_type': 'text',
'title': 'Ukraine',
'payload': 'SET_LOCATION_UA',
},
],
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_setup_webhook():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(
webhook_url='/webhook',
webhook_token='some-token',
))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
mock_http.webhook.assert_called_with(
'/webhook',
fb_interface.handle,
'some-token',
)
@pytest.mark.asyncio
async def test_should_request_user_data_once_we_do_not_know_current_user():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(
page_access_token='qwerty4',
webhook_url='/webhook',
webhook_token='some-token',
))
http = story.use(mockhttp.MockHttpInterface(get={
'first_name': 'Peter',
'last_name': 'Chang',
'profile_pic': 'https://fbcdn-profile-a.akamaihd.net/hprofile-ak-xpf1/v/t1.0-1/p200x200/13055603_10105219398495383_8237637584159975445_n.jpg?oh=1d241d4b6d4dac50eaf9bb73288ea192&oe=57AF5C03&__gda__=1470213755_ab17c8c8e3a0a447fed3f272fa2179ce',
'locale': 'en_US',
'timezone': -7,
'gender': 'male'
}))
story.use(mockdb.MockDB())
await fb_interface.process({
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [
{
'sender': {
'id': 'USER_ID'
},
'recipient': {
'id': 'PAGE_ID'
},
'timestamp': 1458692752478,
'message': {
'mid': 'mid.1457764197618:41d102a3e1ae206a38',
'seq': 73,
'text': 'hello, world!'
}
}
]
}]
})
http.get.assert_called_with(
'https://graph.facebook.com/v2.6/USER_ID',
params={
'access_token': 'qwerty4',
},
)
@pytest.mark.asyncio
async def test_should_request_user_data_and_fail():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(
page_access_token='qwerty5',
webhook_url='/webhook',
webhook_token='some-token',
))
story.use(mockhttp.MockHttpInterface(
get_raise=commonhttp.errors.HttpRequestError()))
db = story.use(mockdb.MockDB())
await fb_interface.process({
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [
{
'sender': {
'id': 'USER_ID'
},
'recipient': {
'id': 'PAGE_ID'
},
'timestamp': 1458692752478,
'message': {
'mid': 'mid.1457764197618:41d102a3e1ae206a38',
'seq': 73,
'text': 'hello, world!'
}
}
]
}]
})
assert (await db.get_user(facebook_user_id='USER_ID')).no_fb_profile is True
@pytest.mark.asyncio
async def test_webhook_handler_should_return_ok_status_if_http_fail():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(
page_access_token='qwerty6',
webhook_url='/webhook',
webhook_token='some-token',
))
story.use(mockhttp.MockHttpInterface(get_raise=commonhttp.errors.HttpRequestError()))
story.use(mockdb.MockDB())
res = await fb_interface.process({
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [
{
'sender': {
'id': 'USER_ID'
},
'recipient': {
'id': 'PAGE_ID'
},
'timestamp': 1458692752478,
'message': {
'mid': 'mid.1457764197618:41d102a3e1ae206a38',
'seq': 73,
'text': 'hello, world!'
}
}
]
}]
})
assert res['status'] == 200
@pytest.mark.asyncio
async def test_webhook_handler_should_return_ok_status_in_any_case():
global story
story = Story()
fb_interface = messenger.FBInterface()
with mock.patch('botstory.integrations.fb.messenger.logger') as mock_logger:
res = await fb_interface.process({
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [
{
'sender': {
'id': 'USER_ID'
},
'recipient': {
'id': 'PAGE_ID'
},
'timestamp': 1458692752478,
'message': {
'mid': 'mid.1457764197618:41d102a3e1ae206a38',
'seq': 73,
'text': 'hello, world!'
}
}
]
}]
})
assert mock_logger.debug.calledWith()
assert res['status'] == 200
# integration
@pytest.fixture
def build_fb_interface():
async def builder():
user = utils.build_fake_user()
session = utils.build_fake_session()
global story
story = Story()
storage = story.use(mockdb.MockDB())
fb = story.use(messenger.FBInterface(page_access_token='qwerty'))
await story.start()
await storage.set_session(session)
await storage.set_user(user)
return fb, story
return builder
@pytest.mark.asyncio
async def test_handler_raw_text(build_fb_interface):
fb_interface, story = await build_fb_interface()
correct_trigger = utils.SimpleTrigger()
incorrect_trigger = utils.SimpleTrigger()
@story.on('hello, world!')
def correct_story():
@story.part()
def store_result(ctx):
correct_trigger.receive(story_context.get_message_data(ctx))
@story.on('Goodbye, world!')
def incorrect_story():
@story.part()
def store_result(ctx):
incorrect_trigger.receive(story_context.get_message_data(ctx))
await fb_interface.process({
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [
{
'sender': {
'id': 'USER_ID'
},
'recipient': {
'id': 'PAGE_ID'
},
'timestamp': 1458692752478,
'message': {
'mid': 'mid.1457764197618:41d102a3e1ae206a38',
'seq': 73,
'text': 'hello, world!'
}
}
]
}]
})
assert incorrect_trigger.value is None
assert correct_trigger.value == {
'text': {
'raw': 'hello, world!'
}
}
@pytest.mark.asyncio
async def test_handler_selected_option(build_fb_interface):
fb_interface, story = await build_fb_interface()
correct_trigger = utils.SimpleTrigger()
incorrect_trigger = utils.SimpleTrigger()
@story.on(receive=option.Equal('GREEN'))
def correct_story():
@story.part()
def store_result(ctx):
correct_trigger.receive(story_context.get_message_data(ctx))
@story.on(receive=option.Equal('BLUE'))
def incorrect_story():
@story.part()
def store_result(ctx):
incorrect_trigger.receive(story_context.get_message_data(ctx))
await fb_interface.process({
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [{
'sender': {
'id': 'USER_ID'
},
'recipient': {
'id': 'PAGE_ID'
},
'timestamp': 1458692752478,
'message': {
'mid': 'mid.1457764197618:41d102a3e1ae206a38',
'seq': 73,
'text': 'Green!',
'quick_reply': {
'payload': 'GREEN'
}
}
}]
}]
})
assert incorrect_trigger.value is None
assert correct_trigger.value == {
'option': {
'value': 'GREEN',
},
'text': {
'raw': 'Green!'
}
}
@pytest.mark.asyncio
async def test_handler_postback(build_fb_interface):
fb_interface, story = await build_fb_interface()
correct_trigger = utils.SimpleTrigger()
incorrect_trigger = utils.SimpleTrigger()
@story.on(receive=option.Equal('GREEN'))
def correct_story():
@story.part()
def store_result(ctx):
correct_trigger.receive(story_context.get_message_data(ctx))
@story.on(receive=option.Equal('BLUE'))
def incorrect_story():
@story.part()
def store_result(ctx):
incorrect_trigger.receive(story_context.get_message_data(ctx))
await fb_interface.process({
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [{
'sender': {
'id': 'USER_ID'
},
'recipient': {
'id': 'PAGE_ID'
},
'timestamp': 1458692752478,
'postback': {
'payload': 'GREEN'
},
}]
}]
})
assert incorrect_trigger.value is None
assert correct_trigger.value == {
'option': {'value': 'GREEN'},
}
@pytest.mark.asyncio
async def test_handler_thumbsup(build_fb_interface):
fb_interface, story = await build_fb_interface()
like_is_here_trigger = utils.SimpleTrigger()
@story.on(receive=sticker.Like())
def like_story():
@story.part()
def store_result(ctx):
like_is_here_trigger.passed()
await fb_interface.process({
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [{
'sender': {
'id': 'USER_ID'
},
'recipient': {
'id': 'PAGE_ID'
},
'timestamp': 1458692752478,
'message': {
'sticker_id': sticker.SMALL_LIKE,
}
}]
}]
})
assert like_is_here_trigger.is_passed()
@pytest.mark.asyncio
async def test_should_not_process_echo_delivery_and_read_messages_as_regular(build_fb_interface):
fb_interface, story = await build_fb_interface()
echo_trigger = utils.SimpleTrigger()
@story.on(receive=any.Any())
def one_story():
@story.part()
def sync_part(message):
echo_trigger.passed()
await fb_interface.process({
'entry': [
{
'id': '329188380752158',
'messaging': [{
'message': {
'app_id': 345865645763384,
'is_echo': 'True',
'mid': 'mid.1477350590023:38b1efd593',
'seq': 323,
'text': 'Hm I dont know what is it'
},
'recipient': {
'id': '1034692249977067'
},
'sender': {
'id': '329188380752158'
},
'timestamp': 1477350590023
}, {
'read': {
'seq': 2697,
'watermark': 1477354670744
},
'recipient': {
'id': '329188380752158'
},
'sender': {
'id': '1034692249977067'
},
'timestamp': 1477354672037
}, {
'delivery': {
'mids': [
'mid.1477354667117:8fedc43d37'
],
'seq': 2679,
'watermark': 1477354668538
},
'recipient': {
'id': '329188380752158'
},
'sender': {
'id': '1034692249977067'
},
'timestamp': 0
}],
'time': 1477350590772
}
],
'object': 'page'
})
assert not echo_trigger.is_triggered
@pytest.mark.asyncio
async def test_set_greeting_text():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty7'))
mock_http = story.use(mockhttp.MockHttpInterface())
await fb_interface.set_greeting_text('Hi there {{user_first_name}}!')
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty7',
},
json={
'greeting': [{
'locale': 'default',
'text': 'Hi there {{user_first_name}}!',
}],
}
)
@pytest.mark.asyncio
async def test_can_set_greeting_text_before_inject_http():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty8'))
await fb_interface.set_greeting_text('Hi there {{user_first_name}}!')
mock_http = story.use(mockhttp.MockHttpInterface())
await story.setup()
# give few a moment for lazy initialization of greeting text
await asyncio.sleep(0.1)
mock_http.post.assert_has_calls([unittest.mock.call(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty8',
},
json={
'greeting': [{
'locale': 'default',
'text': 'Hi there {{user_first_name}}!',
}],
}
)])
@pytest.mark.asyncio
async def test_can_set_greeting_text_in_constructor():
global story
story = Story()
fb = story.use(messenger.FBInterface(
greeting_text='Hi there {{user_first_name}}!',
page_access_token='qwerty9',
))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.setup()
# give few a moment for lazy initialization of greeting text
await asyncio.sleep(0.1)
mock_http.delete.assert_called_with(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty9',
},
json={
'fields': [
'greeting',
]
},
)
mock_http.post.assert_has_calls([unittest.mock.call(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty9',
},
json={
'greeting': [{
'locale': 'default',
'text': 'Hi there {{user_first_name}}!',
}],
}
)])
@pytest.mark.asyncio
async def test_remove_greeting_text():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty10'))
mock_http = story.use(mockhttp.MockHttpInterface())
await fb_interface.remove_greeting_text()
mock_http.delete.assert_called_with(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty10',
},
json={
'fields': [
'greeting',
]
}
)
@pytest.mark.asyncio
async def test_set_greeting_call_to_action_payload():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty11'))
mock_http = story.use(mockhttp.MockHttpInterface())
await fb_interface.set_greeting_call_to_action_payload('SOME_PAYLOAD')
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty11',
},
json={
'get_started': {'payload': 'SOME_PAYLOAD'}
}
)
@pytest.mark.asyncio
async def test_remove_greeting_call_to_action_payload():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty12'))
mock_http = story.use(mockhttp.MockHttpInterface())
await fb_interface.remove_greeting_call_to_action_payload()
mock_http.delete.assert_called_with(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty12',
},
json={
'fields': [
'get_started',
]
}
)
@pytest.mark.asyncio
async def test_set_persistent_menu():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty13'))
mock_http = story.use(mockhttp.MockHttpInterface())
await fb_interface.set_persistent_menu([{
'type': 'postback',
'title': 'Help',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP'
}, {
'type': 'web_url',
'title': 'View Website',
'url': 'http://petersapparel.parseapp.com/'
}])
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty13',
},
json={
'persistent_menu': [
{
'locale': 'default',
'call_to_actions': [{
'type': 'postback',
'title': 'Help',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP',
}, {
'type': 'web_url',
'title': 'View Website',
'url': 'http://petersapparel.parseapp.com/',
}],
},
],
}
)
@pytest.mark.asyncio
async def test_set_persistent_menu_with_locales():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty13'))
mock_http = story.use(mockhttp.MockHttpInterface())
await fb_interface.set_persistent_menu([
{
'locale': 'default',
"composer_input_disabled": True,
'call_to_actions': [{
'type': 'postback',
'title': 'Help',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP',
}, {
'type': 'web_url',
'title': 'View Website',
'url': 'http://petersapparel.parseapp.com/',
}],
},
{
'locale': 'uk_UA',
"composer_input_disabled": True,
'call_to_actions': [{
'type': 'postback',
'title': 'Допомога',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP',
}, {
'type': 'web_url',
'title': 'Переглянути сторінку',
'url': 'http://petersapparel.parseapp.com/',
}],
},
])
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty13',
},
json={
'persistent_menu': [
{
'locale': 'default',
"composer_input_disabled": True,
'call_to_actions': [{
'type': 'postback',
'title': 'Help',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP',
}, {
'type': 'web_url',
'title': 'View Website',
'url': 'http://petersapparel.parseapp.com/',
}],
},
{
'locale': 'uk_UA',
"composer_input_disabled": True,
'call_to_actions': [{
'type': 'postback',
'title': 'Допомога',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP',
}, {
'type': 'web_url',
'title': 'Переглянути сторінку',
'url': 'http://petersapparel.parseapp.com/',
}],
},
],
}
)
@pytest.mark.asyncio
async def test_can_set_persistent_menu_before_http():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty14'))
await fb_interface.set_persistent_menu([{
'type': 'postback',
'title': 'Help',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP'
}, {
'type': 'web_url',
'title': 'View Website',
'url': 'http://petersapparel.parseapp.com/'
}])
mock_http = story.use(mockhttp.MockHttpInterface())
await story.setup()
# give few a moment for lazy initialization of greeting text
await asyncio.sleep(0.1)
mock_http.post.assert_has_calls([unittest.mock.call(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty14',
},
json={
'persistent_menu': [
{
'locale': 'default',
'call_to_actions': [{
'type': 'postback',
'title': 'Help',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP'
}, {
'type': 'web_url',
'title': 'View Website',
'url': 'http://petersapparel.parseapp.com/'
}],
},
],
}
)])
@pytest.mark.asyncio
async def test_can_set_persistent_menu_inside_of_constructor():
global story
story = Story()
story.use(messenger.FBInterface(
page_access_token='qwerty15',
persistent_menu=[{
'type': 'postback',
'title': 'Help',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP'
}, {
'type': 'web_url',
'title': 'View Website',
'url': 'http://petersapparel.parseapp.com/'
}]
))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.setup()
# give few a moment for lazy initialization of greeting text
await asyncio.sleep(0.1)
mock_http.delete.assert_called_with(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty15',
},
json={'fields': [
'persistent_menu',
]}
)
mock_http.post.assert_has_calls([unittest.mock.call(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty15',
},
json={
'persistent_menu': [
{
'locale': 'default',
'call_to_actions': [{
'type': 'postback',
'title': 'Help',
'payload': 'DEVELOPER_DEFINED_PAYLOAD_FOR_HELP'
}, {
'type': 'web_url',
'title': 'View Website',
'url': 'http://petersapparel.parseapp.com/'
}],
},
],
}
)], any_order=True)
@pytest.mark.asyncio
async def test_subscribe_to_page_on_setup():
with answer.Talk() as talk:
story = talk.story
fb_interface = messenger.FBInterface(
page_access_token='one-token',
)
fb_interface.subscribe = aiohttp.test_utils.make_mocked_coro()
http_interface = mockhttp.MockHttpInterface()
story.use(fb_interface)
story.use(http_interface)
await story.setup()
fb_interface.subscribe.assert_called_with()
@pytest.mark.asyncio
async def test_remove_persistent_menu():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty16'))
mock_http = story.use(mockhttp.MockHttpInterface())
await fb_interface.remove_persistent_menu()
mock_http.delete.assert_called_with(
'https://graph.facebook.com/v2.6/me/messenger_profile',
params={
'access_token': 'qwerty16',
},
json={'fields': [
'persistent_menu',
]}
)
def test_get_fb_as_deps():
global story
story = Story()
story.use(messenger.FBInterface())
with di.child_scope():
@di.desc()
class OneClass:
@di.inject()
def deps(self, fb):
self.fb = fb
assert isinstance(di.injector.get('one_class').fb, messenger.FBInterface)
def test_bind_fb_deps():
global story
story = Story()
story.use(messenger.FBInterface())
story.use(mockdb.MockDB())
story.use(mockhttp.MockHttpInterface())
with di.child_scope():
@di.desc()
class OneClass:
@di.inject()
def deps(self, fb):
self.fb = fb
assert isinstance(di.injector.get('one_class').fb.http, mockhttp.MockHttpInterface)
assert isinstance(di.injector.get('one_class').fb.storage, mockdb.MockDB)
def one_message(talk):
return {
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [
{
'sender': {
'id': talk.user['facebook_user_id'],
},
'recipient': {
'id': 'PAGE_ID'
},
'timestamp': 1458692752478,
'message': {
'mid': 'mid.1457764197618:41d102a3e1ae206a38',
'seq': 73,
'text': 'hello, world!'
}
}
]
}]
}
@pytest.mark.asyncio
async def test_quickly_returns_200ok():
trigger = utils.SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
story.use(mockdb.MockDB())
story.use(mockhttp.MockHttpInterface())
@story.on('hello, world!')
def one_story():
@story.part()
def store_result(ctx):
trigger.passed()
await story.start()
res = await fb_interface.handle(one_message(talk))
assert res == {
'status': 200,
'text': 'Ok!',
}
assert not trigger.is_passed()
await asyncio.sleep(0)
assert trigger.is_passed()
@pytest.mark.asyncio
async def test_subscribe():
fb_interface = messenger.FBInterface(
page_access_token='one-token',
)
http_interface = mockhttp.MockHttpInterface()
fb_interface.add_http(http_interface)
await fb_interface.subscribe()
http_interface.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/subscribed_apps',
params={
'access_token': 'one-token',
},
)
@pytest.mark.asyncio
async def test_start_typing():
fake_user = utils.build_fake_user()
fb_interface = messenger.FBInterface(
page_access_token='one-token',
)
http_interface = mockhttp.MockHttpInterface()
fb_interface.add_http(http_interface)
await fb_interface.start_typing(fake_user)
http_interface.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages',
params={
'access_token': 'one-token',
},
json={
'recipient': {
'id': fake_user['facebook_user_id'],
},
'sender_action': 'typing_on',
}
)
@pytest.mark.asyncio
async def test_stop_typing():
fake_user = utils.build_fake_user()
fb_interface = messenger.FBInterface(
page_access_token='one-token',
)
http_interface = mockhttp.MockHttpInterface()
fb_interface.add_http(http_interface)
await fb_interface.stop_typing(fake_user)
http_interface.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages',
params={
'access_token': 'one-token',
},
json={
'recipient': {
'id': fake_user['facebook_user_id'],
},
'sender_action': 'typing_off',
}
)
| 31.304247
| 250
| 0.478648
| 4,556
| 53,812
| 5.429982
| 0.101624
| 0.033793
| 0.045273
| 0.053357
| 0.840697
| 0.826064
| 0.80654
| 0.781883
| 0.764259
| 0.73657
| 0
| 0.034394
| 0.398647
| 53,812
| 1,718
| 251
| 31.322468
| 0.730099
| 0.007266
| 0
| 0.674983
| 0
| 0.000674
| 0.234432
| 0.014623
| 0
| 0
| 0.000899
| 0
| 0.032367
| 1
| 0.017532
| false
| 0.004046
| 0.008766
| 0.000674
| 0.02967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4d8ab5aeaed5c5b25fd1716bda5cc78bc0bac213
| 114
|
py
|
Python
|
tests/__init__.py
|
sobolevn/python-typeclasses
|
5052a4ecc729a43ae010689575c147dd91b4d397
|
[
"ISC"
] | 51
|
2019-07-03T17:17:49.000Z
|
2022-01-09T16:24:29.000Z
|
tests/__init__.py
|
sobolevn/python-typeclasses
|
5052a4ecc729a43ae010689575c147dd91b4d397
|
[
"ISC"
] | 8
|
2019-06-17T02:20:29.000Z
|
2021-08-07T22:15:41.000Z
|
tests/__init__.py
|
sobolevn/python-typeclasses
|
5052a4ecc729a43ae010689575c147dd91b4d397
|
[
"ISC"
] | 3
|
2019-09-25T01:05:20.000Z
|
2020-01-28T13:48:59.000Z
|
"""Do not import this package.
This file is required by pylint and this docstring is required by pydocstyle.
"""
| 22.8
| 77
| 0.754386
| 18
| 114
| 4.777778
| 0.722222
| 0.232558
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175439
| 114
| 4
| 78
| 28.5
| 0.914894
| 0.929825
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4d8d5ed2cc9447da9bda6c56127c203212531037
| 13,374
|
py
|
Python
|
esn_models.py
|
slawrie/covariance-reservoir
|
1cebe39f2e657d0946d723f430ade27efb63d8e9
|
[
"MIT"
] | null | null | null |
esn_models.py
|
slawrie/covariance-reservoir
|
1cebe39f2e657d0946d723f430ade27efb63d8e9
|
[
"MIT"
] | null | null | null |
esn_models.py
|
slawrie/covariance-reservoir
|
1cebe39f2e657d0946d723f430ade27efb63d8e9
|
[
"MIT"
] | null | null | null |
'''
This file contains all the elements to define a reservoir, run it and collect states
For training, you should create a reservoir, run it with the data, collect states and then use
a linear readout to create the mapping.
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy.linalg
from sklearn.metrics import accuracy_score
from sklearn.linear_model import Ridge
# auxiliary function to compute covariance for tensors
def my_covariance(x):
N = x.shape[2]
m1 = x - x.sum(2, keepdims=1) / N
out = np.einsum('ijk,ilk->ijl', m1, m1) / (N - 1)
return out
class SequentialReservoir:
def __init__(self, inSize, resSize, outSize, style='random', leak=1.0, in_density=1.0, density=1.0, radius=0.9,
random_state=42):
self.random_state = random_state
np.random.seed(self.random_state) # fix seed to fix parameters
self.inSize = inSize # number of inputs
self.resSize = resSize # neurons in reservoir
self.outSize = outSize # number of outputs, must match number of classes
self.density = density # connection density within reservoir
self.radius = radius # spectral radius
self.in_density = in_density # connection density from inputs to reservoir
self.Win = (np.random.rand(self.resSize, self.inSize + 1)) - 0.5
self.Win[np.random.rand(self.resSize, self.inSize + 1) > self.in_density] = 0
self.style = style
# now, get the adjacency matrices
# feedforward
if self.density == 0 or self.radius == 0:
self.W = np.zeros((self.resSize, self.resSize))
self.rhoW = 0
else:
if style == 'random':
self.W = np.random.rand(self.resSize, self.resSize) - 0.5 # non sparse
self.W[np.random.rand(resSize, resSize) > self.density] = 0
elif style == 'sym':
self.W = np.zeros([self.resSize, self.resSize])
for i in range(self.resSize):
for j in range(i):
if np.random.rand() < self.density:
self.W[i, j] = np.random.rand() - 0.5
self.W[j, i] = self.W[i, j]
elif style == 'skewsym':
self.W = np.zeros([self.resSize, self.resSize])
for i in range(self.resSize):
for j in range(i):
if np.random.rand() < self.density:
self.W[i, j] = np.random.rand() - 0.5
self.W[j, i] = -self.W[i, j]
elif style == 'self_rec': #only self connections, self recurrent
self.W = np.zeros([self.resSize, self.resSize]) # weights between 0 and 1
for i in range(self.resSize):
self.W[i, i] = np.random.rand() - 0.5
self.rhoW = np.max(abs(scipy.linalg.eig(self.W)[0]))
self.W *= self.radius / self.rhoW # scale with spectral radius
self.Wout = np.random.rand(self.outSize, self.resSize + 1) - 0.5
self.leak = leak
self.resStates = None
self.resCovariance = None
self.outStates = None
self.outCovariance = None
self.resMean = None
self.outMean = None
return
def run(self, data, initLen, trainLen, covariance=False, mean=False):
'''Data is an array. Dimension is (numExamples, numInputs, timeLen)'''
self.resStates = np.zeros((data.shape[0], self.resSize, trainLen)) # collected states
self.outStates = np.zeros((data.shape[0], self.outSize, trainLen)) # output units states
# run the reservoir with the data and collect X
x = np.zeros((data.shape[0], self.resSize)) # current state of reservoir
y = np.zeros((data.shape[0], self.outSize)) # current state of outputs
# add bias unit to input data
ones = np.ones((data.shape[0], 1, data.shape[2]))
inputs = np.concatenate((ones, data), axis=1)
for t in range(trainLen + initLen):
u = inputs[:, :, t] # this has shape batch, inputs
x = (1 - self.leak) * x + self.leak * np.tanh(np.einsum('ij, kj ->ik', u, self.Win) \
+ np.einsum('kj, ij -> ik', self.W, x)) # batch, res
# add bias to reservoir
ones = np.ones((data.shape[0], 1))
u_ = np.concatenate((ones, x), axis=1)
y = np.einsum('ij,kj -> ki', self.Wout, u_)
if t >= initLen:
self.resStates[:, :, t - initLen] = x
self.outStates[:, :, t - initLen] = y
if covariance: # update covariances
self.resCovariance = np.zeros((data.shape[0], self.resSize + 1, self.resSize + 1))
self.outCovariance = np.zeros((data.shape[0], self.outSize, self.outSize))
ones = np.ones((self.resStates.shape[0], 1, self.resStates.shape[2]))
states = np.concatenate((ones, self.resStates), axis=1)
self.resCovariance = my_covariance(states)
self.outCovariance = my_covariance(self.outStates)
# update mean states
if mean: # update mean states
self.resMean = np.mean(self.resStates, axis=2)
self.outMean = np.mean(self.outStates, axis=2)
return
def update_outputs(self, trainLen, initLen, mean=False, covariance=False):
'''Use this function to only update output states and covariances during training'''
# run the reservoir with the data and collect X
y = np.zeros((self.resStates.shape[0], self.outSize)) # current state of outputs
# add bias unit to input data
# add bias to reservoir
ones = np.ones((self.resStates.shape[0], 1, initLen + trainLen))
u_ = np.concatenate((ones, self.resStates), axis=1) # examples, units, time
self.outStates = np.einsum('ij,kjt -> kit', self.Wout, u_)
if covariance:
self.outCovariance = my_covariance(self.outStates)
if mean:
self.outMean = np.mean(self.outStates, axis=2)
return
def predict(self, mode='mean'):
#Run data through reservoir, get covariances in output units. If var0/mean0 > var 1/mean1, class is 0.
Y = []
if mode == 'mean':
for ex in range(self.resStates.shape[0]):
max_out = np.max(self.outMean[ex, :])
pred = np.where(self.outMean[ex, :] == max_out)[0][0]
Y.append(pred)
if mode == 'covariance':
for ex in range(self.resStates.shape[0]):
diagonals = np.diag(self.outCovariance[ex, :, :])
max_out = np.max(diagonals)
pred = np.where(diagonals == max_out)[0][0]
Y.append(pred)
return Y
def score(self, Y_true, Y_pred):
return accuracy_score(Y_true, Y_pred)
class SegregatedReservoir:
# create a reservoir with segregated inputs and outputs
def __init__(self, inSize, resSize, outSize, style='random', leak=1.0, in_density=1.0, density=1.0, radius=0.9,
random_state=42, Nin = 50, Nout = 50):
self.random_state = random_state
np.random.seed(self.random_state)
self.inSize = inSize
self.resSize = resSize
self.outSize = outSize
self.density = density
self.radius = radius
self.in_density = in_density
self.Nin = Nin
self.Nout = Nout
self.style = style
# segregate input nodes
self.Win = np.zeros([self.resSize, self.inSize + 1])
for i in range(int(Nin/2)):
self.Win[i, :] = np.random.rand(self.inSize + 1) - 0.5
self.Win[self.resSize-1-i, :] = np.random.rand(self.inSize + 1) - 0.5
# now, get the connectivity matrices
if style == 'random':
self.W = np.random.rand(self.resSize, self.resSize) # non sparse
self.W = self.W - 0.5 # weights between -0.5 and 0.5
self.W[np.random.rand(resSize, resSize) > self.density] = 0
elif style == 'sym':
self.W = np.zeros([self.resSize, self.resSize])
for i in range(self.resSize):
for j in range(i):
if np.random.rand() < self.density:
self.W[i, j] = np.random.rand() - 0.5
self.W[j, i] = self.W[i, j]
elif style == 'skewsym':
self.W = np.zeros([self.resSize, self.resSize])
for i in range(self.resSize):
for j in range(i):
if np.random.rand() < self.density:
self.W[i, j] = np.random.rand() - 0.5
self.W[j, i] = -self.W[i, j]
self.rhoW = np.max(abs(scipy.linalg.eig(self.W)[0]))
self.W *= self.radius / self.rhoW
# segregate outputs when running reservoir, resStates will now only contain information about Nout nodes
self.Wout = np.random.rand(self.outSize, self.Nout + 1) - 0.5
self.leak = leak
self.resStates = None
self.resCovariance = None
self.outStates = None
self.outCovariance = None
self.resMean = None
self.outMean = None
return
def run(self, data, initLen, trainLen, covariance=False, mean=False):
'''Data is an array. Dimension is (numExamples, numInputs, timeLen)'''
self.resStates = np.zeros((data.shape[0], self.Nout, trainLen)) # collected states
self.outStates = np.zeros((data.shape[0], self.outSize, trainLen)) # output units states
# run the reservoir with the data and collect X
x = np.zeros((data.shape[0], self.resSize)) # current state of reservoir
y = np.zeros((data.shape[0], self.outSize)) # current state of outputs
# add bias unit to input data
ones = np.ones((data.shape[0], 1, data.shape[2]))
inputs = np.concatenate((ones, data), axis=1)
for t in range(trainLen + initLen):
u = inputs[:, :, t] # this has shape batch, inputs
x = (1 - self.leak) * x + self.leak * np.tanh(np.einsum('ij, kj ->ik', u, self.Win) \
+ np.einsum('kj, ij -> ik', self.W, x)) # batch, res
# to update outputs only use Nout nodes
ones = np.ones((data.shape[0], 1))
u_ = np.concatenate((ones, x[:, int(self.resSize/2 - self.Nout/2):int(self.resSize/2 + self.Nout/2)]), axis=1)
y = np.einsum('ij,kj -> ki', self.Wout, u_)
if t >= initLen:
self.resStates[:, :, t - initLen] = x[:, int(self.resSize/2 - self.Nout/2):int(self.resSize/2 + self.Nout/2)]
self.outStates[:, :, t - initLen] = y
if covariance: # update covariances
self.resCovariance = np.zeros((data.shape[0], self.Nout + 1, self.Nout + 1))
self.outCovariance = np.zeros((data.shape[0], self.outSize, self.outSize))
ones = np.ones((self.resStates.shape[0], 1, self.resStates.shape[2]))
states = np.concatenate((ones, self.resStates), axis=1)
self.resCovariance = my_covariance(states)
self.outCovariance = my_covariance(self.outStates)
# update mean states
if mean: # update mean states
self.resMean = np.mean(self.resStates, axis=2)
self.outMean = np.mean(self.outStates, axis=2)
return
def update_outputs(self, trainLen, initLen, mean=False, covariance=False):
'''Use this function to only update output states and covariances during training'''
# run the reservoir with the data and collect X
y = np.zeros((self.resStates.shape[0], self.outSize)) # current state of outputs
# add bias unit to input data
# add bias to reservoir
ones = np.ones((self.resStates.shape[0], 1, initLen + trainLen))
u_ = np.concatenate((ones, self.resStates), axis=1) # examples, units, time
self.outStates = np.einsum('ij,kjt -> kit', self.Wout, u_)
if covariance:
self.outCovariance = my_covariance(self.outStates)
if mean:
self.outMean = np.mean(self.outStates, axis=2)
return
def predict(self, mode='mean'):
Y = []
if mode == 'mean':
for ex in range(self.resStates.shape[0]):
max_out = np.max(self.outMean[ex, :])
pred = np.where(self.outMean[ex, :] == max_out)[0][0]
Y.append(pred)
if mode == 'covariance':
for ex in range(self.resStates.shape[0]):
diagonals = np.diag(self.outCovariance[ex, :, :])
max_out = np.max(diagonals)
pred = np.where(diagonals == max_out)[0][0]
Y.append(pred)
return Y
def score(self, Y_true, Y_pred):
return accuracy_score(Y_true, Y_pred)
| 45.489796
| 126
| 0.552864
| 1,731
| 13,374
| 4.23628
| 0.118429
| 0.055503
| 0.031092
| 0.026183
| 0.7979
| 0.792309
| 0.779763
| 0.773081
| 0.754262
| 0.74717
| 0
| 0.017394
| 0.325108
| 13,374
| 293
| 127
| 45.645051
| 0.795037
| 0.159713
| 0
| 0.805556
| 0
| 0
| 0.01786
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050926
| false
| 0
| 0.027778
| 0.009259
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4dbbb6b08751524fcfb74c5d7db116f3110369c2
| 27
|
py
|
Python
|
rmon/processes/__init__.py
|
gnkr8/rmon
|
7a2438a90baf3ed28faceacd8806d7ca1b32ec90
|
[
"MIT"
] | 1
|
2015-09-08T06:52:44.000Z
|
2015-09-08T06:52:44.000Z
|
rmon/processes/__init__.py
|
gnkr8/rmon
|
7a2438a90baf3ed28faceacd8806d7ca1b32ec90
|
[
"MIT"
] | null | null | null |
rmon/processes/__init__.py
|
gnkr8/rmon
|
7a2438a90baf3ed28faceacd8806d7ca1b32ec90
|
[
"MIT"
] | null | null | null |
from .base import Process
| 9
| 25
| 0.777778
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 2
| 26
| 13.5
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
127fdf96df5d11f7e78a1cb420617db9c2b9ce35
| 427
|
py
|
Python
|
romp/lib/evaluation/__init__.py
|
iory/ROMP
|
d50bab681b5a60d15526fbeec1ed98cb020864b2
|
[
"MIT"
] | null | null | null |
romp/lib/evaluation/__init__.py
|
iory/ROMP
|
d50bab681b5a60d15526fbeec1ed98cb020864b2
|
[
"MIT"
] | null | null | null |
romp/lib/evaluation/__init__.py
|
iory/ROMP
|
d50bab681b5a60d15526fbeec1ed98cb020864b2
|
[
"MIT"
] | null | null | null |
from .evaluation_matrix import compute_error_verts, compute_similarity_transform, compute_similarity_transform_torch, \
batch_compute_similarity_transform_torch, compute_mpjpe
#from evaluation.eval_pckh import eval_pck, eval_pckh
#from evaluation.pw3d_eval import *
from .eval_ds_utils import h36m_evaluation_act_wise, cmup_evaluation_act_wise, pp_evaluation_cam_wise, determ_worst_best, reorganize_vis_info
| 85.4
| 141
| 0.854801
| 58
| 427
| 5.741379
| 0.517241
| 0.126126
| 0.234234
| 0.186186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007853
| 0.105386
| 427
| 5
| 141
| 85.4
| 0.863874
| 0.201405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12a8ae0efe4df6f28a2c98c9efd097e8175bc2fc
| 190
|
py
|
Python
|
ispapi/providers/__init__.py
|
mohamed-zezo/ispapi
|
ec2dc8cde0b742d64a6a0df907ff6572cc279957
|
[
"MIT"
] | 2
|
2019-07-08T06:23:41.000Z
|
2020-07-07T20:16:44.000Z
|
ispapi/providers/__init__.py
|
mohamed-zezo/ispapi
|
ec2dc8cde0b742d64a6a0df907ff6572cc279957
|
[
"MIT"
] | 2
|
2020-07-08T21:28:49.000Z
|
2021-06-02T00:17:14.000Z
|
ispapi/providers/__init__.py
|
mohamed-zezo/ispapi
|
ec2dc8cde0b742d64a6a0df907ff6572cc279957
|
[
"MIT"
] | 1
|
2020-07-12T12:32:37.000Z
|
2020-07-12T12:32:37.000Z
|
class Providers:
from .telecomegypt import TelecomEgypt
from .vodafoneegypt import VodafoneEgypt
try:
from .lebaranl import LebaraNL
except ImportError:
pass
| 23.75
| 44
| 0.705263
| 18
| 190
| 7.444444
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 190
| 7
| 45
| 27.142857
| 0.957143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.142857
| 0.571429
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
12c974728923b40593e41632b4fc0ca7f7c13d74
| 95
|
py
|
Python
|
Sea/adapter/excitations/ViewProviderExcitation.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | 2
|
2015-07-02T13:34:09.000Z
|
2015-09-28T09:07:52.000Z
|
Sea/adapter/excitations/ViewProviderExcitation.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | null | null | null |
Sea/adapter/excitations/ViewProviderExcitation.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | 1
|
2022-01-22T03:01:54.000Z
|
2022-01-22T03:01:54.000Z
|
from ..base import ViewProviderBase
class ViewProviderExcitation(ViewProviderBase):
pass
| 15.833333
| 47
| 0.810526
| 8
| 95
| 9.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136842
| 95
| 5
| 48
| 19
| 0.939024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
42100f2a6c0094ae25c375cacf15639c48964e64
| 2,554
|
py
|
Python
|
isic/ingest/migrations/0031_auto_20210507_2101.py
|
ImageMarkup/isic
|
607b2b103d0d2a67adb61f8ea88f1461c85ec8f3
|
[
"Apache-2.0"
] | null | null | null |
isic/ingest/migrations/0031_auto_20210507_2101.py
|
ImageMarkup/isic
|
607b2b103d0d2a67adb61f8ea88f1461c85ec8f3
|
[
"Apache-2.0"
] | 18
|
2021-06-10T05:14:34.000Z
|
2022-03-22T02:15:59.000Z
|
isic/ingest/migrations/0031_auto_20210507_2101.py
|
ImageMarkup/isic
|
607b2b103d0d2a67adb61f8ea88f1461c85ec8f3
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2 on 2021-05-07 21:01
from django.db import migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('ingest', '0030_auto_20210507_1620'),
]
operations = [
migrations.AlterModelOptions(
name='accession',
options={},
),
migrations.AlterModelOptions(
name='checklog',
options={'get_latest_by': 'created', 'ordering': ['created']},
),
migrations.AlterModelOptions(
name='cohort',
options={'get_latest_by': 'created', 'ordering': ['created']},
),
migrations.AlterModelOptions(
name='contributor',
options={'get_latest_by': 'created', 'ordering': ['created']},
),
migrations.AlterModelOptions(
name='metadatafile',
options={'get_latest_by': 'created', 'ordering': ['created']},
),
migrations.AlterModelOptions(
name='zip',
options={'get_latest_by': 'created', 'ordering': ['created']},
),
migrations.AlterField(
model_name='accession',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, db_index=True
),
),
migrations.AlterField(
model_name='checklog',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, db_index=True
),
),
migrations.AlterField(
model_name='cohort',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, db_index=True
),
),
migrations.AlterField(
model_name='contributor',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, db_index=True
),
),
migrations.AlterField(
model_name='metadatafile',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, db_index=True
),
),
migrations.AlterField(
model_name='zip',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, db_index=True
),
),
]
| 31.530864
| 74
| 0.546985
| 212
| 2,554
| 6.382075
| 0.221698
| 0.082779
| 0.093126
| 0.124169
| 0.735403
| 0.735403
| 0.735403
| 0.735403
| 0.698448
| 0.698448
| 0
| 0.017731
| 0.33751
| 2,554
| 80
| 75
| 31.925
| 0.781915
| 0.016836
| 0
| 0.716216
| 1
| 0
| 0.137106
| 0.009167
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.067568
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4216b4249195b6a064c674203992c689bed93e53
| 61,595
|
py
|
Python
|
060_hair_segmentation/01_float32/01_hair_segmentation_tflite2h5_weight_int_fullint_float16_quant.py
|
IgiArdiyanto/PINTO_model_zoo
|
9247b56a7dff37f28a8a7822a7ef4dd9adf7234d
|
[
"MIT"
] | 1,529
|
2019-12-11T13:36:23.000Z
|
2022-03-31T18:38:27.000Z
|
060_hair_segmentation/01_float32/01_hair_segmentation_tflite2h5_weight_int_fullint_float16_quant.py
|
IgiArdiyanto/PINTO_model_zoo
|
9247b56a7dff37f28a8a7822a7ef4dd9adf7234d
|
[
"MIT"
] | 200
|
2020-01-06T09:24:42.000Z
|
2022-03-31T17:29:08.000Z
|
060_hair_segmentation/01_float32/01_hair_segmentation_tflite2h5_weight_int_fullint_float16_quant.py
|
IgiArdiyanto/PINTO_model_zoo
|
9247b56a7dff37f28a8a7822a7ef4dd9adf7234d
|
[
"MIT"
] | 288
|
2020-02-21T14:56:02.000Z
|
2022-03-30T03:00:35.000Z
|
### tensorflow==2.3.0
### https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html
### https://google.github.io/mediapipe/solutions/pose
### https://www.tensorflow.org/api_docs/python/tf/keras/Model
### https://www.tensorflow.org/lite/guide/ops_compatibility
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/DepthwiseConv2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/ReLU
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Reshape
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Concatenate
### https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer
### https://github.com/google/mediapipe/issues/245
### https://github.com/mvoelk/keras_layers
### How to initialize a convolution layer with an arbitrary kernel in Keras? https://stackoverrun.com/ja/q/12269118
### saved_model_cli show --dir saved_model/ --tag_set serve --signature_def serving_default
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Add, ReLU, PReLU, MaxPool2D, Reshape, Concatenate, Layer
from tensorflow.keras.initializers import Constant
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import nn_ops
import numpy as np
import sys
import cv2
# tmp = np.load('weights/depthwise_conv2d_Kernel')
# print(tmp.shape)
# print(tmp)
# def init_f(shape, dtype=None):
# ker = np.load('weights/depthwise_conv2d_Kernel')
# print(shape)
# return ker
# sys.exit(0)
# class MaxPoolingWithArgmax2D(Layer):
# def __init__(self, pool_size=(2, 2), strides=(2, 2), padding='same', **kwargs):
# super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
# self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
# self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
# self.padding = conv_utils.normalize_padding(padding)
# def call(self, inputs, **kwargs):
# ksize = [1, self.pool_size[0], self.pool_size[1], 1]
# strides = [1, self.strides[0], self.strides[1], 1]
# padding = self.padding.upper()
# output, argmax = nn_ops.max_pool_with_argmax(inputs, ksize, strides, padding)
# # output, argmax = tf.raw_ops.MaxPoolWithArgmax(inputs, ksize, strides, padding)
# argmax = tf.cast(argmax, K.floatx())
# return [output, argmax]
# def compute_output_shape(self, input_shape):
# ratio = (1, 2, 2, 1)
# output_shape = [dim // ratio[idx] if dim is not None else None for idx, dim in enumerate(input_shape)]
# output_shape = tuple(output_shape)
# return [output_shape, output_shape]
# def compute_mask(self, inputs, mask=None):
# return 2 * [None]
# def get_config(self):
# config = super(MaxPoolingWithArgmax2D, self).get_config()
# config.update({
# 'pool_size': self.pool_size,
# 'strides': self.strides,
# 'padding': self.padding,
# })
# return config
def max_pooling_with_argmax2d(input):
net_main = tf.nn.max_pool(input,
ksize=[1,2,2,1],
strides=[1,2,2,1],
padding='SAME')
input_shape = input.get_shape().as_list()
mask_shape = [input_shape[0], input_shape [1]//2,input_shape[2]//2, input_shape[3]]
pooling_indices = tf.zeros(mask_shape, dtype=tf.int64)
for n in range(mask_shape[0]):
for i in range(mask_shape[1]):
for j in range(mask_shape[2]):
in_indices = [ [n, w, h] for w in range(i*2, i*2+2) for h in range(j*2, j*2+2)]
slice = tf.gather_nd(input, in_indices)
argmax = tf.argmax(slice, axis=0)
indices_location = [[n, i, j, d] for d in range(input_shape[3])]
sparse_indices = tf.SparseTensor(indices=indices_location, values=argmax, dense_shape=mask_shape)
pooling_indices = tf.compat.v1.sparse_add(pooling_indices, sparse_indices)
return [net_main, pooling_indices]
class MaxUnpooling2D(Layer):
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
mask = tf.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
# calculation new shape
if output_shape is None:
output_shape = (input_shape[0], input_shape[1] * self.size[0], input_shape[2] * self.size[1], input_shape[3])
# calculation indices for batch, height, width and feature maps
one_like_mask = K.ones_like(mask, dtype='int32')
batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
batch_range = K.reshape(tf.range(output_shape[0], dtype='int32'), shape=batch_shape)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = tf.range(output_shape[3], dtype='int32')
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = tf.size(updates)
indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
values = K.reshape(updates, [updates_size])
ret = tf.scatter_nd(indices, values, output_shape)
return ret
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
output_shape = [mask_shape[0], mask_shape[1] * self.size[0], mask_shape[2] * self.size[1], mask_shape[3]]
return tuple(output_shape)
def get_config(self):
config = super(MaxUnpooling2D, self).get_config()
config.update({
'size': self.size,
})
return config
height = 512
width = 512
inputs = Input(shape=(height, width, 4), batch_size=1, name='input')
# Block_01
conv1_1 = Conv2D(filters=8, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_Bias')))(inputs)
prelu1_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_Alpha')), shared_axes=[1, 2])(conv1_1)
conv1_2 = Conv2D(filters=32, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_1_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_1_Bias')))(prelu1_1)
prelu1_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_1_Alpha')), shared_axes=[1, 2])(conv1_2)
# Block_02
conv2_1 = Conv2D(filters=16, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_2_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_2_Bias')))(prelu1_2)
prelu2_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_2_Alpha')), shared_axes=[1, 2])(conv2_1)
depthconv2_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_Bias')))(prelu2_1)
conv2_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_3_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_3_Bias')))(depthconv2_1)
prelu2_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_3_Alpha')), shared_axes=[1, 2])(conv2_2)
depthconv2_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_1_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_1_Bias')))(prelu2_2)
prelu2_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_4_Alpha')), shared_axes=[1, 2])(depthconv2_2)
conv2_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_4_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_4_Bias')))(prelu2_3)
maxpoolarg2_1 = tf.raw_ops.MaxPoolWithArgmax(input=prelu1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# maxpoolarg2_1 = max_pooling_with_argmax2d(prelu1_2)
conv2_4 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_5_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_5_Bias')))(maxpoolarg2_1[0])
add2_1 = Add()([conv2_3, conv2_4])
prelu2_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_5_Alpha')), shared_axes=[1, 2])(add2_1)
# Block_03
conv3_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_6_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_6_Bias')))(prelu2_4)
prelu3_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_6_Alpha')), shared_axes=[1, 2])(conv3_1)
depthconv3_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_2_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_2_Bias')))(prelu3_1)
conv3_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_7_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_7_Bias')))(depthconv3_1)
prelu3_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_7_Alpha')), shared_axes=[1, 2])(conv3_2)
depthconv3_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_3_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_3_Bias')))(prelu3_2)
prelu3_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_8_Alpha')), shared_axes=[1, 2])(depthconv3_2)
conv3_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_8_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_8_Bias')))(prelu3_3)
add3_1 = Add()([conv3_3, prelu2_4])
prelu3_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_9_Alpha')), shared_axes=[1, 2])(add3_1)
# Block_04
conv4_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_9_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_9_Bias')))(prelu3_4)
prelu4_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_10_Alpha')), shared_axes=[1, 2])(conv4_1)
depthconv4_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_4_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_4_Bias')))(prelu4_1)
conv4_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_10_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_10_Bias')))(depthconv4_1)
prelu4_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_11_Alpha')), shared_axes=[1, 2])(conv4_2)
depthconv4_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_5_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_5_Bias')))(prelu4_2)
prelu4_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_12_Alpha')), shared_axes=[1, 2])(depthconv4_2)
conv4_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_11_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_11_Bias')))(prelu4_3)
add4_1 = Add()([conv4_3, prelu3_4])
prelu4_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_13_Alpha')), shared_axes=[1, 2])(add4_1)
# Block_05
conv5_1 = Conv2D(filters=32, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_12_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_12_Bias')))(prelu4_4)
prelu5_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_14_Alpha')), shared_axes=[1, 2])(conv5_1)
depthconv5_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_6_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_6_Bias')))(prelu5_1)
conv5_2 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_13_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_13_Bias')))(depthconv5_1)
prelu5_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_15_Alpha')), shared_axes=[1, 2])(conv5_2)
depthconv5_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_7_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_7_Bias')))(prelu5_2)
prelu5_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_16_Alpha')), shared_axes=[1, 2])(depthconv5_2)
conv5_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_14_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_14_Bias')))(prelu5_3)
maxpoolarg5_1 = tf.raw_ops.MaxPoolWithArgmax(input=prelu4_4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# maxpoolarg5_1 = max_pooling_with_argmax2d(prelu4_4)
conv5_4 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_15_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_15_Bias')))(maxpoolarg5_1[0])
add5_1 = Add()([conv5_3, conv5_4])
prelu5_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_17_Alpha')), shared_axes=[1, 2])(add5_1)
# Block_06
conv6_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_16_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_16_Bias')))(prelu5_4)
prelu6_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_18_Alpha')), shared_axes=[1, 2])(conv6_1)
depthconv6_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_8_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_8_Bias')))(prelu6_1)
conv6_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_17_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_17_Bias')))(depthconv6_1)
prelu6_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_19_Alpha')), shared_axes=[1, 2])(conv6_2)
depthconv6_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_9_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_9_Bias')))(prelu6_2)
prelu6_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_20_Alpha')), shared_axes=[1, 2])(depthconv6_2)
conv6_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_18_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_18_Bias')))(prelu6_3)
add6_1 = Add()([conv6_3, prelu5_4])
prelu6_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_21_Alpha')), shared_axes=[1, 2])(add6_1)
# Block_07
conv7_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_19_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_19_Bias')))(prelu6_4)
prelu7_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_22_Alpha')), shared_axes=[1, 2])(conv7_1)
depthconv7_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_10_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_10_Bias')))(prelu7_1)
conv7_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_20_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_20_Bias')))(depthconv7_1)
prelu7_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_23_Alpha')), shared_axes=[1, 2])(conv7_2)
depthconv7_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_11_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_11_Bias')))(prelu7_2)
prelu7_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_24_Alpha')), shared_axes=[1, 2])(depthconv7_2)
conv7_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_21_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_21_Bias')))(prelu7_3)
add7_1 = Add()([conv7_3, prelu6_4])
prelu7_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_25_Alpha')), shared_axes=[1, 2])(add7_1)
# Block_08
conv8_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_22_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_22_Bias')))(prelu7_4)
prelu8_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_26_Alpha')), shared_axes=[1, 2])(conv8_1)
depthconv8_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_12_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_12_Bias')))(prelu8_1)
conv8_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_23_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_23_Bias')))(depthconv8_1)
prelu8_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_27_Alpha')), shared_axes=[1, 2])(conv8_2)
depthconv8_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_13_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_13_Bias')))(prelu8_2)
prelu8_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_28_Alpha')), shared_axes=[1, 2])(depthconv8_2)
conv8_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_24_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_24_Bias')))(prelu8_3)
add8_1 = Add()([conv8_3, prelu7_4])
prelu8_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_29_Alpha')), shared_axes=[1, 2])(add8_1)
# Block_09
conv9_1 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_25_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_25_Bias')))(prelu8_4)
prelu9_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_30_Alpha')), shared_axes=[1, 2])(conv9_1)
depthconv9_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_14_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_14_Bias')))(prelu9_1)
conv9_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_26_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_26_Bias')))(depthconv9_1)
prelu9_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_31_Alpha')), shared_axes=[1, 2])(conv9_2)
depthconv9_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_15_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_15_Bias')))(prelu9_2)
prelu9_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_32_Alpha')), shared_axes=[1, 2])(depthconv9_2)
conv9_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_27_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_27_Bias')))(prelu9_3)
add9_1 = Add()([conv9_3, prelu8_4])
prelu9_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_33_Alpha')), shared_axes=[1, 2])(add9_1)
# Block_10
conv10_1 = Conv2D(filters=16, kernel_size=[2, 2], strides=[2, 2], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_28_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_28_Bias')))(prelu9_4)
prelu10_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_34_Alpha')), shared_axes=[1, 2])(conv10_1)
depthconv10_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_16_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_16_Bias')))(prelu10_1)
conv10_2 = Conv2D(filters=16, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_29_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_29_Bias')))(depthconv10_1)
prelu10_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_35_Alpha')), shared_axes=[1, 2])(conv10_2)
depthconv10_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_17_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_17_Bias')))(prelu10_2)
prelu10_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_36_Alpha')), shared_axes=[1, 2])(depthconv10_2)
conv10_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_30_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_30_Bias')))(prelu10_3)
maxpoolarg10_1 = tf.raw_ops.MaxPoolWithArgmax(input=prelu9_4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# maxpoolarg10_1 = max_pooling_with_argmax2d(prelu9_4)
add10_1 = Add()([conv10_3, maxpoolarg10_1[0]])
prelu10_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_37_Alpha')), shared_axes=[1, 2])(add10_1)
# Block_11
conv11_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_31_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_31_Bias')))(prelu10_4)
prelu11_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_38_Alpha')), shared_axes=[1, 2])(conv11_1)
depthconv11_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_18_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_18_Bias')))(prelu11_1)
conv11_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_32_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_32_Bias')))(depthconv11_1)
prelu11_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_39_Alpha')), shared_axes=[1, 2])(conv11_2)
depthconv11_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_19_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_19_Bias')))(prelu11_2)
prelu11_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_40_Alpha')), shared_axes=[1, 2])(depthconv11_2)
conv11_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_33_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_33_Bias')))(prelu11_3)
add11_1 = Add()([conv11_3, prelu10_4])
prelu11_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_41_Alpha')), shared_axes=[1, 2])(add11_1)
# Block_12
conv12_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_34_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_34_Bias')))(prelu11_4)
prelu12_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_42_Alpha')), shared_axes=[1, 2])(conv12_1)
conv12_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[2, 2],
kernel_initializer=Constant(np.load('weights/conv2d_35_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_35_Bias')))(prelu12_1)
prelu12_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_43_Alpha')), shared_axes=[1, 2])(conv12_2)
conv12_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_36_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_36_Bias')))(prelu12_2)
add12_1 = Add()([conv12_3, prelu11_4])
prelu12_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_44_Alpha')), shared_axes=[1, 2])(add12_1)
# Block_13
conv13_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_37_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_37_Bias')))(prelu12_3)
prelu13_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_45_Alpha')), shared_axes=[1, 2])(conv13_1)
depthconv13_1 = DepthwiseConv2D(kernel_size=[5, 5], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_20_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_20_Bias')))(prelu13_1)
conv13_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_38_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_38_Bias')))(depthconv13_1)
prelu13_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_46_Alpha')), shared_axes=[1, 2])(conv13_2)
conv13_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_39_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_39_Bias')))(prelu13_2)
add13_1 = Add()([conv13_3, prelu12_3])
prelu13_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_47_Alpha')), shared_axes=[1, 2])(add13_1)
# Block_14
conv14_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_40_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_40_Bias')))(prelu13_4)
prelu14_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_48_Alpha')), shared_axes=[1, 2])(conv14_1)
conv14_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[4, 4],
kernel_initializer=Constant(np.load('weights/conv2d_41_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_41_Bias')))(prelu14_1)
prelu14_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_49_Alpha')), shared_axes=[1, 2])(conv14_2)
conv14_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_42_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_42_Bias')))(prelu14_2)
add14_1 = Add()([conv14_3, prelu13_4])
prelu14_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_50_Alpha')), shared_axes=[1, 2])(add14_1)
# Block_15
conv15_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_43_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_43_Bias')))(prelu14_3)
prelu15_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_51_Alpha')), shared_axes=[1, 2])(conv15_1)
depthconv15_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_21_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_21_Bias')))(prelu15_1)
conv15_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_44_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_44_Bias')))(depthconv15_1)
prelu15_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_52_Alpha')), shared_axes=[1, 2])(conv15_2)
depthconv15_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_22_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_22_Bias')))(prelu15_2)
prelu15_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_53_Alpha')), shared_axes=[1, 2])(depthconv15_2)
conv15_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_45_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_45_Bias')))(prelu15_3)
add15_1 = Add()([conv15_3, prelu14_3])
prelu15_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_54_Alpha')), shared_axes=[1, 2])(add15_1)
# Block_16
conv16_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_46_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_46_Bias')))(prelu15_4)
prelu16_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_55_Alpha')), shared_axes=[1, 2])(conv16_1)
conv16_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_47_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_47_Bias')))(prelu16_1)
prelu16_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_56_Alpha')), shared_axes=[1, 2])(conv16_2)
conv16_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_48_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_48_Bias')))(prelu16_2)
add16_1 = Add()([conv16_3, prelu15_4])
prelu16_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_57_Alpha')), shared_axes=[1, 2])(add16_1)
# Block_17
conv17_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_49_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_49_Bias')))(prelu16_3)
prelu17_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_58_Alpha')), shared_axes=[1, 2])(conv17_1)
depthconv17_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_23_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_23_Bias')))(prelu17_1)
conv17_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_50_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_50_Bias')))(depthconv17_1)
prelu17_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_59_Alpha')), shared_axes=[1, 2])(conv17_2)
depthconv17_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_24_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_24_Bias')))(prelu17_2)
prelu17_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_60_Alpha')), shared_axes=[1, 2])(depthconv17_2)
conv17_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_51_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_51_Bias')))(prelu17_3)
add17_1 = Add()([conv17_3, prelu16_3])
prelu17_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_61_Alpha')), shared_axes=[1, 2])(add17_1)
# Block_18
conv18_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_46_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_46_Bias')))(prelu17_4)
prelu18_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_55_Alpha')), shared_axes=[1, 2])(conv18_1)
conv18_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[2, 2],
kernel_initializer=Constant(np.load('weights/conv2d_47_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_47_Bias')))(prelu18_1)
prelu18_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_56_Alpha')), shared_axes=[1, 2])(conv18_2)
conv18_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_48_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_48_Bias')))(prelu18_2)
add18_1 = Add()([conv18_3, prelu17_4])
prelu18_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_57_Alpha')), shared_axes=[1, 2])(add18_1)
# Block_19
conv19_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_55_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_55_Bias')))(prelu18_3)
prelu19_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_65_Alpha')), shared_axes=[1, 2])(conv19_1)
depthconv19_1 = DepthwiseConv2D(kernel_size=[5, 5], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_25_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_25_Bias')))(prelu19_1)
conv19_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_56_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_56_Bias')))(depthconv19_1)
prelu19_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_66_Alpha')), shared_axes=[1, 2])(conv19_2)
conv19_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_57_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_57_Bias')))(prelu19_2)
add19_1 = Add()([conv19_3, prelu18_3])
prelu19_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_67_Alpha')), shared_axes=[1, 2])(add19_1)
# Block_20
conv20_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_58_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_58_Bias')))(prelu19_4)
prelu20_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_68_Alpha')), shared_axes=[1, 2])(conv20_1)
conv20_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[4, 4],
kernel_initializer=Constant(np.load('weights/conv2d_59_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_59_Bias')))(prelu20_1)
prelu20_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_69_Alpha')), shared_axes=[1, 2])(conv20_2)
conv20_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_60_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_60_Bias')))(prelu20_2)
add20_1 = Add()([conv20_3, prelu19_4])
prelu20_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_70_Alpha')), shared_axes=[1, 2])(add20_1)
# Block_21
conv21_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_61_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_61_Bias')))(prelu20_3)
prelu21_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_71_Alpha')), shared_axes=[1, 2])(conv21_1)
depthconv21_1 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_26_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_26_Bias')))(prelu21_1)
conv21_2 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_62_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_62_Bias')))(depthconv21_1)
prelu21_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_72_Alpha')), shared_axes=[1, 2])(conv21_2)
depthconv21_2 = DepthwiseConv2D(kernel_size=[3, 3], strides=[1, 1], padding="same", depth_multiplier=1, dilation_rate=[1, 1],
depthwise_initializer=Constant(np.load('weights/depthwise_conv2d_27_Kernel')),
bias_initializer=Constant(np.load('weights/depthwise_conv2d_27_Bias')))(prelu21_2)
prelu21_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_73_Alpha')), shared_axes=[1, 2])(depthconv21_2)
conv21_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_63_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_63_Bias')))(prelu21_3)
add21_1 = Add()([conv21_3, prelu20_3])
prelu21_4 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_74_Alpha')), shared_axes=[1, 2])(add21_1)
# Block_22
conv22_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_64_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_64_Bias')))(prelu21_4)
prelu22_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_75_Alpha')), shared_axes=[1, 2])(conv22_1)
conv22_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_65_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_65_Bias')))(prelu22_1)
prelu22_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_76_Alpha')), shared_axes=[1, 2])(conv22_2)
conv22_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='valid', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_66_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_66_Bias')))(prelu22_2)
add22_1 = Add()([conv22_3, prelu21_4])
prelu22_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_77_Alpha')), shared_axes=[1, 2])(add22_1)
# Block_23
conv23_1 = Conv2D(filters=4, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_67_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_67_Bias')))(prelu22_3)
prelu23_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_78_Alpha')), shared_axes=[1, 2])(conv23_1)
conv23_2 = Conv2D(filters=4, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_68_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_68_Bias')))(prelu23_1)
prelu23_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_79_Alpha')), shared_axes=[1, 2])(conv23_2)
conv23_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_69_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_69_Bias')))(prelu23_2)
add23_1 = Add()([conv23_3, prelu22_3])
prelu23_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_80_Alpha')), shared_axes=[1, 2])(add23_1)
# Block_24
conv24_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_70_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_70_Bias')))(prelu23_3)
prelu24_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_81_Alpha')), shared_axes=[1, 2])(conv24_1)
convtransbias24_1 = Conv2DTranspose(filters=8, kernel_size=(3, 3), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_Bias')))(prelu24_1)
prelu24_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_82_Alpha')), shared_axes=[1, 2])(convtransbias24_1)
conv24_2 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_71_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_71_Bias')))(prelu24_2)
conv24_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_72_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_72_Bias')))(prelu23_3)
maxunpool24_1 = MaxUnpooling2D(size=[2, 2])([conv24_3, maxpoolarg10_1[1]])
add24_1 = Add()([conv24_2, maxunpool24_1])
prelu24_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_77_Alpha')), shared_axes=[1, 2])(add24_1)
concat24_1 = Concatenate()([prelu24_3, prelu5_4])
# Block_25
conv25_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_73_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_73_Bias')))(concat24_1)
prelu25_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_84_Alpha')), shared_axes=[1, 2])(conv25_1)
conv25_2 = Conv2D(filters=8, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_74_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_74_Bias')))(prelu25_1)
prelu25_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_85_Alpha')), shared_axes=[1, 2])(conv25_2)
conv25_3 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_75_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_75_Bias')))(prelu25_2)
conv25_4 = Conv2D(filters=128, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_76_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_76_Bias')))(concat24_1)
add25_1 = Add()([conv25_3, conv25_4])
prelu25_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_86_Alpha')), shared_axes=[1, 2])(add25_1)
# Block_26
conv26_1 = Conv2D(filters=8, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_77_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_77_Bias')))(prelu25_3)
prelu26_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_87_Alpha')), shared_axes=[1, 2])(conv26_1)
convtransbias26_1 = Conv2DTranspose(filters=8, kernel_size=(3, 3), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_1_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_1_Bias')))(prelu26_1)
prelu26_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_88_Alpha')), shared_axes=[1, 2])(convtransbias26_1)
conv26_2 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_78_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_78_Bias')))(prelu26_2)
conv26_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_79_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_79_Bias')))(prelu25_3)
maxunpool26_1 = MaxUnpooling2D(size=[2, 2])([conv26_3, maxpoolarg5_1[1]])
add26_1 = Add()([conv26_2, maxunpool26_1])
prelu26_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_89_Alpha')), shared_axes=[1, 2])(add26_1)
concat26_1 = Concatenate()([prelu26_3, prelu2_4])
# Block_27
conv27_1 = Conv2D(filters=4, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_80_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_80_Bias')))(concat26_1)
prelu27_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_90_Alpha')), shared_axes=[1, 2])(conv27_1)
conv27_2 = Conv2D(filters=4, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[8, 8],
kernel_initializer=Constant(np.load('weights/conv2d_81_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_81_Bias')))(prelu27_1)
prelu27_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_91_Alpha')), shared_axes=[1, 2])(conv27_2)
conv27_3 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_82_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_82_Bias')))(prelu27_2)
conv27_4 = Conv2D(filters=64, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_83_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_83_Bias')))(concat26_1)
add27_1 = Add()([conv27_3, conv27_4])
prelu27_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_92_Alpha')), shared_axes=[1, 2])(add27_1)
# Block_28
conv28_1 = Conv2D(filters=4, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_84_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_84_Bias')))(prelu27_3)
prelu28_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_93_Alpha')), shared_axes=[1, 2])(conv28_1)
convtransbias28_1 = Conv2DTranspose(filters=4, kernel_size=(3, 3), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_2_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_2_Bias')))(prelu28_1)
prelu28_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_94_Alpha')), shared_axes=[1, 2])(convtransbias28_1)
conv28_2 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_85_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_85_Bias')))(prelu28_2)
conv28_3 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_86_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_86_Bias')))(prelu27_3)
maxunpool28_1 = MaxUnpooling2D(size=[2, 2])([conv28_3, maxpoolarg2_1[1]])
add28_1 = Add()([conv28_2, maxunpool28_1])
prelu28_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_95_Alpha')), shared_axes=[1, 2])(add28_1)
# Block_29
conv29_1 = Conv2D(filters=4, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_87_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_87_Bias')))(prelu28_3)
prelu29_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_96_Alpha')), shared_axes=[1, 2])(conv29_1)
conv29_2 = Conv2D(filters=4, kernel_size=[3, 3], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_88_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_88_Bias')))(prelu29_1)
prelu29_2 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_97_Alpha')), shared_axes=[1, 2])(conv29_2)
conv29_3 = Conv2D(filters=32, kernel_size=[1, 1], strides=[1, 1], padding='same', dilation_rate=[1, 1],
kernel_initializer=Constant(np.load('weights/conv2d_89_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_89_Bias')))(prelu29_2)
add29_1 = Add()([conv29_3, prelu28_3])
prelu29_3 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_98_Alpha')), shared_axes=[1, 2])(add29_1)
# Block_30
convtransbias30_1 = Conv2DTranspose(filters=8, kernel_size=(2, 2), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_3_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_3_Bias')))(prelu29_3)
prelu30_1 = PReLU(alpha_initializer=Constant(np.load('weights/p_re_lu_99_Alpha')), shared_axes=[1, 2])(convtransbias30_1)
convtransbias30_2 = Conv2DTranspose(filters=2, kernel_size=(2, 2), strides=(2, 2), padding='same',
kernel_initializer=Constant(np.load('weights/conv2d_transpose_4_Kernel').transpose(1,2,3,0)),
bias_initializer=Constant(np.load('weights/conv2d_transpose_4_Bias')), name='conv2d_transpose_4')(prelu30_1)
# model = Model(inputs=inputs, outputs=[prelu2_4])
model = Model(inputs=inputs, outputs=[convtransbias30_2])
model.summary()
tf.saved_model.save(model, 'saved_model_{}x{}'.format(height, width))
model.save('hair_segmentation_{}x{}.h5'.format(height, width))
full_model = tf.function(lambda inputs: model(inputs))
full_model = full_model.get_concrete_function(inputs = (tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)))
frozen_func = convert_variables_to_constants_v2(full_model, lower_control_flow=False)
frozen_func.graph.as_graph_def()
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=".",
name="hair_segmentation_{}x{}_float32.pb".format(height, width),
as_text=False)
# No Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
with open('hair_segmentation_{}x{}_float32.tflite'.format(height, width), 'wb') as w:
w.write(tflite_model)
print("tflite convert complete! - hair_segmentation_{}x{}_float32.tflite".format(height, width))
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
with open('hair_segmentation_{}x{}_weight_quant.tflite'.format(height, width), 'wb') as w:
w.write(tflite_model)
print("Weight Quantization complete! - hair_segmentation_{}x{}_weight_quant.tflite".format(height, width))
# def representative_dataset_gen():
# for image in raw_test_data:
# image = cv2.cvtColor(image, cv2.COLOR_RGB2RGBA)
# image = tf.image.resize(image, (height, width))
# image = image[np.newaxis,:,:,:]
# print('image.shape:', image.shape)
# yield [image]
# raw_test_data = np.load('calibration_data_img_person.npy', allow_pickle=True)
# # Integer Quantization - Input/Output=float32
# converter = tf.lite.TFLiteConverter.from_keras_model(model)
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.SELECT_TF_OPS]
# converter.representative_dataset = representative_dataset_gen
# tflite_quant_model = converter.convert()
# with open('hair_segmentation_{}x{}_integer_quant.tflite'.format(height, width), 'wb') as w:
# w.write(tflite_quant_model)
# print("Integer Quantization complete! - hair_segmentation_{}x{}_integer_quant.tflite".format(height, width))
# # Full Integer Quantization - Input/Output=int8
# converter = tf.lite.TFLiteConverter.from_keras_model(model)
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.SELECT_TF_OPS]
# converter.inference_input_type = tf.uint8
# converter.inference_output_type = tf.uint8
# converter.representative_dataset = representative_dataset_gen
# tflite_quant_model = converter.convert()
# with open('hair_segmentation_{}x{}_full_integer_quant.tflite'.format(height, width), 'wb') as w:
# w.write(tflite_quant_model)
# print("Full Integer Quantization complete! - hair_segmentation_{}x{}_full_integer_quant.tflite".format(height, width))
# # Float16 Quantization - Input/Output=float32
# converter = tf.lite.TFLiteConverter.from_keras_model(model)
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.target_spec.supported_types = [tf.float16, tf.lite.OpsSet.SELECT_TF_OPS]
# tflite_quant_model = converter.convert()
# with open('hair_segmentation_{}x{}_float16_quant.tflite'.format(height, width), 'wb') as w:
# w.write(tflite_quant_model)
# print("Float16 Quantization complete! - hair_segmentation_{}x{}_float16_quant.tflite".format(height, width))
# # EdgeTPU
# import subprocess
# result = subprocess.check_output(["edgetpu_compiler", "-s", "hair_segmentation_{}x{}_full_integer_quant.tflite".format(height, width)])
# print(result)
| 66.589189
| 137
| 0.705593
| 9,058
| 61,595
| 4.513248
| 0.055752
| 0.051222
| 0.110663
| 0.21159
| 0.780778
| 0.750569
| 0.717913
| 0.70441
| 0.700154
| 0.653458
| 0
| 0.079844
| 0.134605
| 61,595
| 924
| 138
| 66.661255
| 0.687097
| 0.094407
| 0
| 0.023529
| 0
| 0
| 0.172137
| 0.159099
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008403
| false
| 0
| 0.018487
| 0
| 0.035294
| 0.003361
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
422f4731cfceb1066f0e78f02822595481fdf794
| 159
|
py
|
Python
|
iwg_blog/thumbnail_lazy/tasks.py
|
razortheory/who-iwg-webapp
|
e2318d286cd9ab87d4d8103bc7b3072cfb99bf76
|
[
"MIT"
] | null | null | null |
iwg_blog/thumbnail_lazy/tasks.py
|
razortheory/who-iwg-webapp
|
e2318d286cd9ab87d4d8103bc7b3072cfb99bf76
|
[
"MIT"
] | null | null | null |
iwg_blog/thumbnail_lazy/tasks.py
|
razortheory/who-iwg-webapp
|
e2318d286cd9ab87d4d8103bc7b3072cfb99bf76
|
[
"MIT"
] | null | null | null |
from celery.task import task
from sorl.thumbnail import get_thumbnail
@task
def generate_thumbnail_lazy(*args, **kwargs):
get_thumbnail(*args, **kwargs)
| 19.875
| 45
| 0.773585
| 22
| 159
| 5.409091
| 0.545455
| 0.201681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125786
| 159
| 7
| 46
| 22.714286
| 0.856115
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
42740edfb7fe58c5a11b2a4657784a2203998b0e
| 15,022
|
py
|
Python
|
App/migrations/0008_business_gallery_test_data.py
|
avivz450/A-GBusinessesPromotions
|
0dd6e678af5a95dd0246fd1d448c099d86774263
|
[
"MIT"
] | 1
|
2021-08-18T22:23:57.000Z
|
2021-08-18T22:23:57.000Z
|
App/migrations/0008_business_gallery_test_data.py
|
avivz450/A-GBusinessesPromotions
|
0dd6e678af5a95dd0246fd1d448c099d86774263
|
[
"MIT"
] | 72
|
2021-05-22T18:04:54.000Z
|
2021-09-18T16:32:15.000Z
|
App/migrations/0008_business_gallery_test_data.py
|
avivz450/A-GBusinessesPromotions
|
0dd6e678af5a95dd0246fd1d448c099d86774263
|
[
"MIT"
] | null | null | null |
from django.db import migrations, transaction
class Migration(migrations.Migration):
dependencies = [
("App", "0007_categories_test_data"),
]
def generate_data(apps, schema_editor):
from App.models import Business_Image, Business
from django.shortcuts import get_object_or_404
business_image_test_data = [
(1, "App/images/BusinessesGallery/matt_does_fitness_1.jfif"),
(1, "App/images/BusinessesGallery/matt_does_fitness_2.jfif"),
(1, "App/images/BusinessesGallery/matt_does_fitness_3.jfif"),
(1, "App/images/BusinessesGallery/matt_does_fitness_4.jfif"),
(1, "App/images/BusinessesGallery/matt_does_fitness_5.jfif"),
(2, "App/images/BusinessesGallery/space_florentin_1.jfif"),
(2, "App/images/BusinessesGallery/space_florentin_2.jfif"),
(2, "App/images/BusinessesGallery/space_florentin_3.jfif"),
(2, "App/images/BusinessesGallery/space_florentin_4.jpg"),
(2, "App/images/BusinessesGallery/space_florentin_5.jfif"),
(3, "App/images/BusinessesGallery/lake_tlv_1.png"),
(3, "App/images/BusinessesGallery/lake_tlv_2.jpg"),
(3, "App/images/BusinessesGallery/lake_tlv_3.jpg"),
(3, "App/images/BusinessesGallery/lake_tlv_4.jfif"),
(3, "App/images/BusinessesGallery/lake_tlv_5.jfif"),
(
4,
"App/images/BusinessesGallery/vegan_business_1_1.jpg",
),
(
4,
"App/images/BusinessesGallery/vegan_business_1_2.jpg",
),
(
4,
"App/images/BusinessesGallery/vegan_business_1_3.jfif",
),
(
4,
"App/images/BusinessesGallery/vegan_business_1_4.jpg",
),
(
4,
"App/images/BusinessesGallery/vegan_business_1_5.jpg",
),
(
5,
"App/images/BusinessesGallery/vegan_business_2_1.jfif",
),
(
5,
"App/images/BusinessesGallery/vegan_business_2_2.png",
),
(
5,
"App/images/BusinessesGallery/vegan_business_2_3.jpg",
),
(
5,
"App/images/BusinessesGallery/vegan_business_2_4.jpg",
),
(
5,
"App/images/BusinessesGallery/vegan_business_2_5.jpg",
),
(
6,
"App/images/BusinessesGallery/vegan_business_3_1.jfif",
),
(
6,
"App/images/BusinessesGallery/vegan_business_3_2.png",
),
(
6,
"App/images/BusinessesGallery/vegan_business_3_3.jpeg",
),
(
6,
"App/images/BusinessesGallery/vegan_business_3_4.jpg",
),
(
6,
"App/images/BusinessesGallery/vegan_business_3_5.jfif",
),
(
7,
"App/images/BusinessesGallery/coffe_botique_1.jpg",
),
(
7,
"App/images/BusinessesGallery/coffe_botique_2.jfif",
),
(
7,
"App/images/BusinessesGallery/coffe_botique_3.jfif",
),
(
7,
"App/images/BusinessesGallery/coffe_botique_4.jpg",
),
(
7,
"App/images/BusinessesGallery/coffe_botique_5.jfif",
),
(
8,
"App/images/BusinessesGallery/ninja_1.jpg",
),
(
8,
"App/images/BusinessesGallery/ninja_2.jpg",
),
(
8,
"App/images/BusinessesGallery/ninja_3.jpg",
),
(
8,
"App/images/BusinessesGallery/ninja_4.jfif",
),
(
8,
"App/images/BusinessesGallery/ninja_5.jfif",
),
(
9,
"App/images/BusinessesGallery/butcher_1.jpg",
),
(
9,
"App/images/BusinessesGallery/butcher_2.jpeg",
),
(
9,
"App/images/BusinessesGallery/butcher_3.jpg",
),
(
9,
"App/images/BusinessesGallery/butcher_4.jfif",
),
(
9,
"App/images/BusinessesGallery/butcher_5.jpg",
),
(
10,
"App/images/BusinessesGallery/candy_shop_1.jfif",
),
(
10,
"App/images/BusinessesGallery/candy_shop_2.jfif",
),
(
10,
"App/images/BusinessesGallery/candy_shop_3.jpg",
),
(
10,
"App/images/BusinessesGallery/candy_shop_4.jpg",
),
(
10,
"App/images/BusinessesGallery/candy_shop_5.png",
),
(
11,
"App/images/BusinessesGallery/weights_shop_1.jpg",
),
(
11,
"App/images/BusinessesGallery/weights_shop_2.jfif",
),
(
11,
"App/images/BusinessesGallery/weights_shop_3.jfif",
),
(
11,
"App/images/BusinessesGallery/weights_shop_4.jpg",
),
(
11,
"App/images/BusinessesGallery/weights_shop_5.jpg",
),
(
12,
"App/images/BusinessesGallery/my_protein_1.jfif",
),
(
12,
"App/images/BusinessesGallery/my_protein_2.jfif",
),
(
12,
"App/images/BusinessesGallery/my_protein_3.jfif",
),
(
12,
"App/images/BusinessesGallery/my_protein_4.png",
),
(
12,
"App/images/BusinessesGallery/my_protein_5.jpg",
),
(
13,
"App/images/BusinessesGallery/crafting_shop_1.jfif",
),
(
13,
"App/images/BusinessesGallery/crafting_shop_2.jpg",
),
(
13,
"App/images/BusinessesGallery/crafting_shop_3.jfif",
),
(
13,
"App/images/BusinessesGallery/crafting_shop_4.jfif",
),
(
13,
"App/images/BusinessesGallery/crafting_shop_5.jfif",
),
(
14,
"App/images/BusinessesGallery/chocolate_botique_1.jpg",
),
(
14,
"App/images/BusinessesGallery/chocolate_botique_2.jfif",
),
(
14,
"App/images/BusinessesGallery/chocolate_botique_3.jpg",
),
(
14,
"App/images/BusinessesGallery/chocolate_botique_4.jpg",
),
(
14,
"App/images/BusinessesGallery/chocolate_botique_5.jpg",
),
(
15,
"App/images/BusinessesGallery/dairy_queen_1.jfif",
),
(
15,
"App/images/BusinessesGallery/dairy_queen_2.jpg",
),
(
15,
"App/images/BusinessesGallery/dairy_queen_3.jpg",
),
(
15,
"App/images/BusinessesGallery/dairy_queen_4.jpg",
),
(
15,
"App/images/BusinessesGallery/dairy_queen_5.jpg",
),
(
16,
"App/images/BusinessesGallery/the_old_fisherman_1.jpg",
),
(
16,
"App/images/BusinessesGallery/the_old_fisherman_2.jfif",
),
(
16,
"App/images/BusinessesGallery/the_old_fisherman_3.jpg",
),
(
16,
"App/images/BusinessesGallery/the_old_fisherman_4.jfif",
),
(
16,
"App/images/BusinessesGallery/the_old_fisherman_5.jpg",
),
(
17,
"App/images/BusinessesGallery/knife_master_1.jfif",
),
(
17,
"App/images/BusinessesGallery/knife_master_2.jfif",
),
(
17,
"App/images/BusinessesGallery/knife_master_3.jfif",
),
(
17,
"App/images/BusinessesGallery/knife_master_4.jfif",
),
(
17,
"App/images/BusinessesGallery/knife_master_5.jpg",
),
(
18,
"App/images/BusinessesGallery/grill_store_1.jfif",
),
(
18,
"App/images/BusinessesGallery/grill_store_2.jfif",
),
(
18,
"App/images/BusinessesGallery/grill_store_3.jfif",
),
(
18,
"App/images/BusinessesGallery/grill_store_4.jfif",
),
(
18,
"App/images/BusinessesGallery/grill_store_5.jfif",
),
(
19,
"App/images/BusinessesGallery/shoe_store_1.jpg",
),
(
19,
"App/images/BusinessesGallery/shoe_store_2.jfif",
),
(
19,
"App/images/BusinessesGallery/shoe_store_3.jpg",
),
(
19,
"App/images/BusinessesGallery/shoe_store_4.jfif",
),
(
19,
"App/images/BusinessesGallery/shoe_store_5.jfif",
),
(
20,
"App/images/BusinessesGallery/sports_wear_1.jfif",
),
(
20,
"App/images/BusinessesGallery/sports_wear_2.jfif",
),
(
20,
"App/images/BusinessesGallery/sports_wear_3.jfif",
),
(
20,
"App/images/BusinessesGallery/sports_wear_4.jfif",
),
(
20,
"App/images/BusinessesGallery/sports_wear_5.jfif",
),
(
21,
"App/images/BusinessesGallery/sports_equipment_1.jfif",
),
(
21,
"App/images/BusinessesGallery/sports_equipment_2.jfif",
),
(
21,
"App/images/BusinessesGallery/sports_equipment_3.jfif",
),
(
21,
"App/images/BusinessesGallery/sports_equipment_4.jfif",
),
(
21,
"App/images/BusinessesGallery/sports_equipment_5.jfif",
),
(
22,
"App/images/BusinessesGallery/climbing_store_1.jfif",
),
(
22,
"App/images/BusinessesGallery/climbing_store_2.jfif",
),
(
22,
"App/images/BusinessesGallery/climbing_store_3.jfif",
),
(
22,
"App/images/BusinessesGallery/climbing_store_4.jfif",
),
(
22,
"App/images/BusinessesGallery/climbing_store_5.jfif",
),
(
23,
"App/images/BusinessesGallery/diving_store_1.jfif",
),
(
23,
"App/images/BusinessesGallery/diving_store_2.jfif",
),
(
23,
"App/images/BusinessesGallery/diving_store_3.jfif",
),
(
23,
"App/images/BusinessesGallery/diving_store_4.jfif",
),
(
23,
"App/images/BusinessesGallery/diving_store_5.jfif",
),
(
24,
"App/images/BusinessesGallery/bones_restaurant_1.jfif",
),
(
24,
"App/images/BusinessesGallery/bones_restaurant_2.jfif",
),
(
24,
"App/images/BusinessesGallery/bones_restaurant_3.jfif",
),
(
24,
"App/images/BusinessesGallery/bones_restaurant_4.jfif",
),
(
24,
"App/images/BusinessesGallery/bones_restaurant_5.jfif",
),
(
25,
"App/images/BusinessesGallery/coffee_machine_1.jfif",
),
(
25,
"App/images/BusinessesGallery/coffee_machine_2.jfif",
),
(
25,
"App/images/BusinessesGallery/coffee_machine_3.jfif",
),
(
25,
"App/images/BusinessesGallery/coffee_machine_4.jfif",
),
(
25,
"App/images/BusinessesGallery/coffee_machine_5.jfif",
),
(26, "App/images/BusinessesGallery/coffee_house_1.jfif"),
(26, "App/images/BusinessesGallery/coffee_house_2.jfif"),
(26, "App/images/BusinessesGallery/coffee_house_3.jfif"),
(26, "App/images/BusinessesGallery/coffee_house_4.jfif"),
(26, "App/images/BusinessesGallery/coffee_house_5.jfif"),
]
with transaction.atomic():
for business_id, image in business_image_test_data:
business_image = Business_Image(
business=get_object_or_404(Business, pk=business_id),
image=image,
)
business_image.save()
operations = [
migrations.RunPython(generate_data),
]
| 30.845996
| 73
| 0.429171
| 1,131
| 15,022
| 5.435897
| 0.094607
| 0.190306
| 0.549772
| 0.075634
| 0.906474
| 0.893787
| 0.791314
| 0.139558
| 0
| 0
| 0
| 0.04677
| 0.473372
| 15,022
| 486
| 74
| 30.909465
| 0.730375
| 0
| 0
| 0.458333
| 1
| 0
| 0.420583
| 0.420383
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002083
| false
| 0
| 0.00625
| 0
| 0.014583
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
35fc81fd8a74cd15edc6a2c3a22e078686ec427e
| 157
|
py
|
Python
|
Chapter 7/For Loop.py
|
Jigyanshu17/Python-Ka-Saara-Gyaan
|
d3f5dbb3fef45a7a6953bf6041b0b3bf6c54ad2b
|
[
"Apache-2.0"
] | null | null | null |
Chapter 7/For Loop.py
|
Jigyanshu17/Python-Ka-Saara-Gyaan
|
d3f5dbb3fef45a7a6953bf6041b0b3bf6c54ad2b
|
[
"Apache-2.0"
] | null | null | null |
Chapter 7/For Loop.py
|
Jigyanshu17/Python-Ka-Saara-Gyaan
|
d3f5dbb3fef45a7a6953bf6041b0b3bf6c54ad2b
|
[
"Apache-2.0"
] | null | null | null |
'''
#list1 = ["Jiggu","JJ","gg","GG"]
tuple = ("Jiggu","JJ","gg","GG")
#for item in list1:
#print(item)
for item in tuple:
print(item)
'''
| 15.7
| 34
| 0.496815
| 22
| 157
| 3.545455
| 0.409091
| 0.179487
| 0.230769
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0.229299
| 157
| 9
| 35
| 17.444444
| 0.628099
| 0.88535
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c489d72695f64e56d6dab79d110f67773e7290e2
| 44
|
py
|
Python
|
extraterrestrial_life_equations/__init__.py
|
JMViJi/MachineLearningND-Upload-a-Package-to-PyPi
|
d99df1726b02fc41318b61b62513c525d430bd16
|
[
"MIT"
] | null | null | null |
extraterrestrial_life_equations/__init__.py
|
JMViJi/MachineLearningND-Upload-a-Package-to-PyPi
|
d99df1726b02fc41318b61b62513c525d430bd16
|
[
"MIT"
] | null | null | null |
extraterrestrial_life_equations/__init__.py
|
JMViJi/MachineLearningND-Upload-a-Package-to-PyPi
|
d99df1726b02fc41318b61b62513c525d430bd16
|
[
"MIT"
] | null | null | null |
from .SaraSeagarEquation import SSEquation
| 22
| 43
| 0.863636
| 4
| 44
| 9.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 1
| 44
| 44
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c48ac43b968b6c012b081b88a55a29f705a4ae92
| 33
|
py
|
Python
|
pytorch_privacy/analysis/__init__.py
|
MJHutchinson/PytorchPrivacy
|
b8084914a00b2047054f79d8339609bcdfb9d026
|
[
"Apache-2.0"
] | 2
|
2020-01-06T00:54:54.000Z
|
2020-05-03T14:55:39.000Z
|
pytorch_privacy/analysis/__init__.py
|
MJHutchinson/PytorchPrivacy
|
b8084914a00b2047054f79d8339609bcdfb9d026
|
[
"Apache-2.0"
] | null | null | null |
pytorch_privacy/analysis/__init__.py
|
MJHutchinson/PytorchPrivacy
|
b8084914a00b2047054f79d8339609bcdfb9d026
|
[
"Apache-2.0"
] | 1
|
2019-10-23T00:15:19.000Z
|
2019-10-23T00:15:19.000Z
|
from .online_accountant import *
| 16.5
| 32
| 0.818182
| 4
| 33
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
67b75dd5e0b3deb4eadb195e952ad1abd1f0a815
| 22
|
py
|
Python
|
plugins/assets/__init__.py
|
qrilka/this-week-in-rust
|
f8d52595802c29c7ac950c6d6d48c0e89f3d79c3
|
[
"MIT"
] | 533
|
2020-10-02T23:11:23.000Z
|
2022-03-31T17:25:25.000Z
|
plugins/assets/__init__.py
|
qrilka/this-week-in-rust
|
f8d52595802c29c7ac950c6d6d48c0e89f3d79c3
|
[
"MIT"
] | 614
|
2015-01-09T16:36:44.000Z
|
2022-02-23T14:32:15.000Z
|
plugins/assets/__init__.py
|
qrilka/this-week-in-rust
|
f8d52595802c29c7ac950c6d6d48c0e89f3d79c3
|
[
"MIT"
] | 423
|
2020-10-09T17:09:41.000Z
|
2022-03-30T14:37:52.000Z
|
from .assets import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
67c723a6a722d2980d1e71565023c7d2278c9985
| 8,724
|
py
|
Python
|
reviewboard/reviews/tests/test_new_review_request_view.py
|
pombredanne/reviewboard
|
15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d
|
[
"MIT"
] | null | null | null |
reviewboard/reviews/tests/test_new_review_request_view.py
|
pombredanne/reviewboard
|
15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d
|
[
"MIT"
] | null | null | null |
reviewboard/reviews/tests/test_new_review_request_view.py
|
pombredanne/reviewboard
|
15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d
|
[
"MIT"
] | null | null | null |
"""Unit tests for reviewboard.reviews.views.NewReviewRequestView."""
from django.contrib.auth.models import User
from djblets.siteconfig.models import SiteConfiguration
from djblets.testing.decorators import add_fixtures
from reviewboard.testing import TestCase
class NewReviewRequestViewTests(TestCase):
"""Unit tests for reviewboard.reviews.views.NewReviewRequestView."""
fixtures = ['test_users']
# TODO: Split this up into multiple unit tests, and do a better job of
# checking for expected results.
def test_get(self):
"""Testing NewReviewRequestView.get"""
with self.siteconfig_settings({'auth_require_sitewide_login': False},
reload_settings=False):
response = self.client.get('/r/new')
self.assertEqual(response.status_code, 301)
response = self.client.get('/r/new/')
self.assertEqual(response.status_code, 302)
self.client.login(username='grumpy', password='grumpy')
response = self.client.get('/r/new/')
self.assertEqual(response.status_code, 200)
def test_read_only_mode_for_users(self):
"""Testing NewReviewRequestView when in read-only mode for regular
users
"""
self.siteconfig = SiteConfiguration.objects.get_current()
settings = {
'site_read_only': True,
}
with self.siteconfig_settings(settings):
# Ensure user is redirected when trying to create new review
# request.
self.client.logout()
self.client.login(username='doc', password='doc')
resp = self.client.get('/r/new/')
self.assertEqual(resp.status_code, 302)
def test_read_only_mode_for_superusers(self):
"""Testing NewReviewRequestView when in read-only mode for superusers
"""
self.siteconfig = SiteConfiguration.objects.get_current()
settings = {
'site_read_only': True,
}
with self.siteconfig_settings(settings):
# Ensure admin can still access new while in read-only mode.
self.client.logout()
self.client.login(username='admin', password='admin')
resp = self.client.get('/r/new/')
self.assertEqual(resp.status_code, 200)
def test_get_context_data_with_no_repos(self):
"""Testing NewReviewRequestView.get_context_data with no repositories
"""
self.client.login(username='grumpy', password='grumpy')
response = self.client.get('/r/new/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['page_model_attrs'], {
'repositories': [
{
'filesOnly': True,
'localSitePrefix': '',
'name': '(None - File attachments only)',
'scmtoolName': '',
'supportsPostCommit': False,
},
],
})
@add_fixtures(['test_scmtools', 'test_site'])
def test_get_context_data_with_repos(self):
"""Testing NewReviewRequestView.get_context_data with repositories"""
self.client.login(username='grumpy', password='grumpy')
user = User.objects.get(username='grumpy')
# These will be shown in the repository list.
repo1 = self.create_repository(
name='Repository 1',
tool_name='Git')
repo2 = self.create_repository(
name='Repository 2',
tool_name='Subversion')
repo3 = self.create_repository(
name='Repository 3',
tool_name='Perforce',
public=False)
repo3.users.add(user)
# These won't be shown.
self.create_repository(
name='Repository 4',
tool_name='Git',
public=False)
self.create_repository(
name='Repository 5',
tool_name='Git',
with_local_site=True)
response = self.client.get('/r/new/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['page_model_attrs'], {
'repositories': [
{
'filesOnly': True,
'localSitePrefix': '',
'name': '(None - File attachments only)',
'scmtoolName': '',
'supportsPostCommit': False,
},
{
'filesOnly': False,
'id': repo1.pk,
'localSitePrefix': '',
'name': 'Repository 1',
'requiresBasedir': False,
'requiresChangeNumber': False,
'scmtoolName': 'Git',
'supportsPostCommit': False,
},
{
'filesOnly': False,
'id': repo2.pk,
'localSitePrefix': '',
'name': 'Repository 2',
'requiresBasedir': True,
'requiresChangeNumber': False,
'scmtoolName': 'Subversion',
'supportsPostCommit': True,
},
{
'filesOnly': False,
'id': repo3.pk,
'localSitePrefix': '',
'name': 'Repository 3',
'requiresBasedir': False,
'requiresChangeNumber': True,
'scmtoolName': 'Perforce',
'supportsPostCommit': False,
},
],
})
@add_fixtures(['test_scmtools', 'test_site'])
def test_get_context_data_with_repos_and_local_site(self):
"""Testing NewReviewRequestView.get_context_data with repositories
and Local Site
"""
user = User.objects.get(username='grumpy')
self.get_local_site(self.local_site_name).users.add(user)
self.client.login(username='grumpy', password='grumpy')
# These will be shown in the repository list.
repo1 = self.create_repository(
name='Repository 1',
tool_name='Git',
with_local_site=True)
repo2 = self.create_repository(
name='Repository 2',
tool_name='Subversion',
with_local_site=True)
repo3 = self.create_repository(
name='Repository 3',
tool_name='Perforce',
public=False,
with_local_site=True)
repo3.users.add(user)
# These won't be shown.
self.create_repository(
name='Repository 4',
tool_name='Git',
public=False,
with_local_site=True)
self.create_repository(
name='Repository 5',
tool_name='Git')
local_site_prefix = 's/%s/' % self.local_site_name
response = self.client.get('/%sr/new/' % local_site_prefix)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['page_model_attrs'], {
'repositories': [
{
'filesOnly': True,
'localSitePrefix': local_site_prefix,
'name': '(None - File attachments only)',
'scmtoolName': '',
'supportsPostCommit': False,
},
{
'filesOnly': False,
'id': repo1.pk,
'localSitePrefix': local_site_prefix,
'name': 'Repository 1',
'requiresBasedir': False,
'requiresChangeNumber': False,
'scmtoolName': 'Git',
'supportsPostCommit': False,
},
{
'filesOnly': False,
'id': repo2.pk,
'localSitePrefix': local_site_prefix,
'name': 'Repository 2',
'requiresBasedir': True,
'requiresChangeNumber': False,
'scmtoolName': 'Subversion',
'supportsPostCommit': True,
},
{
'filesOnly': False,
'id': repo3.pk,
'localSitePrefix': local_site_prefix,
'name': 'Repository 3',
'requiresBasedir': False,
'requiresChangeNumber': True,
'scmtoolName': 'Perforce',
'supportsPostCommit': False,
},
],
})
| 35.608163
| 77
| 0.518913
| 744
| 8,724
| 5.924731
| 0.185484
| 0.036298
| 0.045372
| 0.054446
| 0.838249
| 0.829401
| 0.788339
| 0.719374
| 0.666969
| 0.622505
| 0
| 0.009892
| 0.374255
| 8,724
| 244
| 78
| 35.754098
| 0.797582
| 0.101903
| 0
| 0.755319
| 0
| 0
| 0.186275
| 0.003483
| 0
| 0
| 0
| 0.004098
| 0.058511
| 1
| 0.031915
| false
| 0.031915
| 0.021277
| 0
| 0.06383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
67d14dd351c817df9e5d27dd866c6cce83f0e6a4
| 118
|
py
|
Python
|
app/report/views/report_data_views.py
|
michaelscales88/mWreporting_final
|
b0399fb32fd594c2f5a20d47c2c0dceaecb6f326
|
[
"MIT"
] | 2
|
2019-06-10T21:15:03.000Z
|
2020-01-02T13:12:45.000Z
|
app/report/views/report_data_views.py
|
michaelscales88/python-reporting-app
|
b0399fb32fd594c2f5a20d47c2c0dceaecb6f326
|
[
"MIT"
] | 14
|
2018-01-18T19:07:15.000Z
|
2018-05-16T18:44:55.000Z
|
app/report/views/report_data_views.py
|
michaelscales88/mWreporting_final
|
b0399fb32fd594c2f5a20d47c2c0dceaecb6f326
|
[
"MIT"
] | null | null | null |
from app.base_view import BaseView
class CallDataView(BaseView):
pass
class EventDataView(BaseView):
pass
| 11.8
| 34
| 0.754237
| 14
| 118
| 6.285714
| 0.714286
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186441
| 118
| 9
| 35
| 13.111111
| 0.916667
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
67f47f9b4d1e717f91d6c328c12d02a1f1c3c73e
| 8,470
|
py
|
Python
|
tests/test_position.py
|
nervecell23/qstrader_c
|
8bec7e4fb6d9b326ad5c7efa136d0c30ba41d3f6
|
[
"MIT"
] | 113
|
2019-01-11T05:55:41.000Z
|
2022-03-27T23:49:47.000Z
|
tests/test_position.py
|
nervecell23/qstrader_c
|
8bec7e4fb6d9b326ad5c7efa136d0c30ba41d3f6
|
[
"MIT"
] | 7
|
2019-04-09T05:30:24.000Z
|
2020-09-09T04:52:49.000Z
|
tests/test_position.py
|
nervecell23/qstrader_c
|
8bec7e4fb6d9b326ad5c7efa136d0c30ba41d3f6
|
[
"MIT"
] | 54
|
2019-01-10T17:22:14.000Z
|
2022-03-15T23:47:43.000Z
|
import unittest
from qstrader.position import Position
from qstrader.price_parser import PriceParser
class TestRoundTripXOMPosition(unittest.TestCase):
"""
Test a round-trip trade in Exxon-Mobil where the initial
trade is a buy/long of 100 shares of XOM, at a price of
$74.78, with $1.00 commission.
"""
def setUp(self):
"""
Set up the Position object that will store the PnL.
"""
self.position = Position(
"BOT", "XOM", 100,
PriceParser.parse(74.78), PriceParser.parse(1.00),
PriceParser.parse(74.78), PriceParser.parse(74.80)
)
def test_calculate_round_trip(self):
"""
After the subsequent purchase, carry out two more buys/longs
and then close the position out with two additional sells/shorts.
The following prices have been tested against those calculated
via Interactive Brokers' Trader Workstation (TWS).
"""
self.position.transact_shares(
"BOT", 100, PriceParser.parse(74.63), PriceParser.parse(1.00)
)
self.position.transact_shares(
"BOT", 250, PriceParser.parse(74.620), PriceParser.parse(1.25)
)
self.position.transact_shares(
"SLD", 200, PriceParser.parse(74.58), PriceParser.parse(1.00)
)
self.position.transact_shares(
"SLD", 250, PriceParser.parse(75.26), PriceParser.parse(1.25)
)
self.position.update_market_value(
PriceParser.parse(77.75), PriceParser.parse(77.77)
)
self.assertEqual(self.position.action, "BOT")
self.assertEqual(self.position.ticker, "XOM")
self.assertEqual(self.position.quantity, 0)
self.assertEqual(self.position.buys, 450)
self.assertEqual(self.position.sells, 450)
self.assertEqual(self.position.net, 0)
self.assertEqual(
PriceParser.display(self.position.avg_bot, 5), 74.65778
)
self.assertEqual(
PriceParser.display(self.position.avg_sld, 5), 74.95778
)
self.assertEqual(PriceParser.display(self.position.total_bot), 33596.00)
self.assertEqual(PriceParser.display(self.position.total_sld), 33731.00)
self.assertEqual(PriceParser.display(self.position.net_total), 135.00)
self.assertEqual(PriceParser.display(self.position.total_commission), 5.50)
self.assertEqual(PriceParser.display(self.position.net_incl_comm), 129.50)
self.assertEqual(
PriceParser.display(self.position.avg_price, 3), 74.665
)
self.assertEqual(PriceParser.display(self.position.cost_basis), 0.00)
self.assertEqual(PriceParser.display(self.position.market_value), 0.00)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), 0.00)
self.assertEqual(PriceParser.display(self.position.realised_pnl), 129.50)
class TestRoundTripPGPosition(unittest.TestCase):
"""
Test a round-trip trade in Proctor & Gamble where the initial
trade is a sell/short of 100 shares of PG, at a price of
$77.69, with $1.00 commission.
"""
def setUp(self):
self.position = Position(
"SLD", "PG", 100,
PriceParser.parse(77.69), PriceParser.parse(1.00),
PriceParser.parse(77.68), PriceParser.parse(77.70)
)
def test_calculate_round_trip(self):
"""
After the subsequent sale, carry out two more sells/shorts
and then close the position out with two additional buys/longs.
The following prices have been tested against those calculated
via Interactive Brokers' Trader Workstation (TWS).
"""
self.position.transact_shares(
"SLD", 100, PriceParser.parse(77.68), PriceParser.parse(1.00)
)
self.position.transact_shares(
"SLD", 50, PriceParser.parse(77.70), PriceParser.parse(1.00)
)
self.position.transact_shares(
"BOT", 100, PriceParser.parse(77.77), PriceParser.parse(1.00)
)
self.position.transact_shares(
"BOT", 150, PriceParser.parse(77.73), PriceParser.parse(1.00)
)
self.position.update_market_value(
PriceParser.parse(77.72), PriceParser.parse(77.72)
)
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "PG")
self.assertEqual(self.position.quantity, 0)
self.assertEqual(self.position.buys, 250)
self.assertEqual(self.position.sells, 250)
self.assertEqual(self.position.net, 0)
self.assertEqual(
PriceParser.display(self.position.avg_bot, 3), 77.746
)
self.assertEqual(
PriceParser.display(self.position.avg_sld, 3), 77.688
)
self.assertEqual(PriceParser.display(self.position.total_bot), 19436.50)
self.assertEqual(PriceParser.display(self.position.total_sld), 19422.00)
self.assertEqual(PriceParser.display(self.position.net_total), -14.50)
self.assertEqual(PriceParser.display(self.position.total_commission), 5.00)
self.assertEqual(PriceParser.display(self.position.net_incl_comm), -19.50)
self.assertEqual(
PriceParser.display(self.position.avg_price, 5), 77.67600
)
self.assertEqual(PriceParser.display(self.position.cost_basis), 0.00)
self.assertEqual(PriceParser.display(self.position.market_value), 0.00)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), 0.00)
self.assertEqual(PriceParser.display(self.position.realised_pnl), -19.50)
class TestShortPosition(unittest.TestCase):
"""
Test a short position in Proctor & Gamble where the initial
trade is a sell/short of 100 shares of PG, at a price of
$77.69, with $1.00 commission.
"""
def setUp(self):
self.position = Position(
"SLD", "PG", 100,
PriceParser.parse(77.69), PriceParser.parse(1.00),
PriceParser.parse(77.68), PriceParser.parse(77.70)
)
def test_open_short_position(self):
self.assertEqual(PriceParser.display(self.position.cost_basis), -7768.00)
self.assertEqual(PriceParser.display(self.position.market_value), -7769.00)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), -1.00)
self.assertEqual(PriceParser.display(self.position.realised_pnl), 0.00)
self.position.update_market_value(
PriceParser.parse(77.72), PriceParser.parse(77.72)
)
self.assertEqual(PriceParser.display(self.position.cost_basis), -7768.00)
self.assertEqual(PriceParser.display(self.position.market_value), -7772.00)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), -4.00)
self.assertEqual(PriceParser.display(self.position.realised_pnl), 0.00)
class TestProfitLossBuying(unittest.TestCase):
"""
Tests that the unrealised and realised pnls are
working after position initialization, every
transaction, and every price update
"""
def setUp(self):
self.position = Position(
"BOT", "XOM", 100,
PriceParser.parse(74.78), PriceParser.parse(1.00),
PriceParser.parse(74.77), PriceParser.parse(74.79)
)
def test_realised_unrealised_calcs(self):
self.assertEqual(
PriceParser.display(self.position.unrealised_pnl), -1.00
)
self.assertEqual(
PriceParser.display(self.position.realised_pnl), 0.00
)
self.position.update_market_value(
PriceParser.parse(75.77), PriceParser.parse(75.79)
)
self.assertEqual(
PriceParser.display(self.position.unrealised_pnl), 99.00
)
self.position.transact_shares(
"SLD", 100,
PriceParser.parse(75.78), PriceParser.parse(1.00)
)
self.assertEqual(
PriceParser.display(self.position.unrealised_pnl), 99.00
) # still high
self.assertEqual(
PriceParser.display(self.position.realised_pnl), 98.00
)
self.position.update_market_value(
PriceParser.parse(75.77), PriceParser.parse(75.79)
)
self.assertEqual(
PriceParser.display(self.position.unrealised_pnl), 0.00
)
if __name__ == "__main__":
unittest.main()
| 39.032258
| 83
| 0.651358
| 1,003
| 8,470
| 5.420738
| 0.161515
| 0.150083
| 0.181718
| 0.230642
| 0.839066
| 0.797131
| 0.774324
| 0.767151
| 0.654773
| 0.483539
| 0
| 0.065822
| 0.235891
| 8,470
| 216
| 84
| 39.212963
| 0.774258
| 0.131523
| 0
| 0.428571
| 0
| 0
| 0.009516
| 0
| 0
| 0
| 0
| 0
| 0.324675
| 1
| 0.051948
| false
| 0
| 0.019481
| 0
| 0.097403
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
db2093dd9086025227c94243dbc8a4a8f74ccf61
| 77
|
py
|
Python
|
3-controlling-the-flow/conversation.py
|
elbeg/introduction-to-python
|
c8a88b1c83c573f623b81fbdb4324aefa9bfbb50
|
[
"BSD-2-Clause"
] | 5
|
2015-09-22T19:38:06.000Z
|
2017-04-27T13:14:00.000Z
|
3-controlling-the-flow/conversation.py
|
elbeg/introduction-to-python
|
c8a88b1c83c573f623b81fbdb4324aefa9bfbb50
|
[
"BSD-2-Clause"
] | 3
|
2015-12-14T16:27:54.000Z
|
2018-03-08T16:28:30.000Z
|
3-controlling-the-flow/conversation.py
|
elbeg/introduction-to-python
|
c8a88b1c83c573f623b81fbdb4324aefa9bfbb50
|
[
"BSD-2-Clause"
] | 11
|
2015-09-30T15:24:00.000Z
|
2018-07-12T15:15:44.000Z
|
def say_hello():
print('Hello')
def say_goodbye():
print('Goodbye')
| 12.833333
| 19
| 0.623377
| 10
| 77
| 4.6
| 0.5
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194805
| 77
| 6
| 20
| 12.833333
| 0.741935
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c00ada2afbd7bab91f8ada9d5f13941c8710d354
| 10,993
|
py
|
Python
|
bounca/webapp/forms.py
|
warthog9/bounca
|
f83a372fcfa6e9874c81c785fd0ebdb49842eba3
|
[
"Apache-2.0"
] | null | null | null |
bounca/webapp/forms.py
|
warthog9/bounca
|
f83a372fcfa6e9874c81c785fd0ebdb49842eba3
|
[
"Apache-2.0"
] | null | null | null |
bounca/webapp/forms.py
|
warthog9/bounca
|
f83a372fcfa6e9874c81c785fd0ebdb49842eba3
|
[
"Apache-2.0"
] | null | null | null |
"""Web app forms"""
from django import forms
from django.utils import timezone
from djng.forms import NgFormValidationMixin, NgModelForm, NgModelFormMixin
from djng.styling.bootstrap3.forms import Bootstrap3FormMixin
from ..x509_pki.forms import CertificateCRLForm as CertificateCRLFormX509
from ..x509_pki.forms import CertificateForm as CertificateFormX509
from ..x509_pki.forms import CertificateRevokeForm as CertificateRevokeFormX509
from ..x509_pki.forms import DistinguishedNameForm
from ..x509_pki.types import CertificateTypes
class AddDistinguishedNameRootCAForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
DistinguishedNameForm):
scope_prefix = 'cert_data.dn'
form_name = 'cert_form'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['subjectAltNames'].widget = forms.HiddenInput()
self.fields['commonName'].help_text = \
'The common name of your certification authority.' + \
'This field is used to identify your CA in the chain'
class AddRootCAForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
CertificateFormX509):
scope_prefix = 'cert_data'
form_name = 'cert_form'
def clean_parent(self):
return None
def clean_type(self):
return CertificateTypes.ROOT
def __init__(self, *args, **kwargs):
kwargs.update(auto_id=False, scope_prefix=self.scope_prefix)
super().__init__(*args, **kwargs)
self.fields.pop('dn')
self.initial['parent'] = None
self.initial['type'] = CertificateTypes.ROOT
self.initial['expires_at'] = timezone.now(
) + timezone.timedelta(weeks=1040)
self.fields['expires_at'].help_text = \
'Expiration date of the root certificate, ' + \
'typically 20 years. (format: yyyy-mm-dd)'
self.fields['parent'].widget = forms.HiddenInput()
self.fields['type'].widget = forms.HiddenInput()
self.fields['passphrase_in'].widget = forms.HiddenInput()
if 'scope_prefix' in kwargs:
kwargs.pop('scope_prefix')
if 'prefix' in kwargs:
kwargs.pop('prefix')
if 'initial' in kwargs and 'dn' in kwargs['initial']:
initial = kwargs.pop('initial')
kwargs['initial'] = initial['dn']
self.dn = AddDistinguishedNameRootCAForm(
scope_prefix='cert_data.dn', **kwargs)
def is_valid(self):
if not self.dn.is_valid():
self.errors.update(self.dn.errors)
return super().is_valid() and self.dn.is_valid()
class AddDistinguishedNameIntermediateCAForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
DistinguishedNameForm):
scope_prefix = 'cert_data.dn'
form_name = 'cert_form'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['subjectAltNames'].widget = forms.HiddenInput()
self.fields['commonName'].help_text = \
'The common name of your intermediate certification authority. ' + \
'This field is used to identify your intermediate CA in the chain'
self.fields['countryName'].widget.attrs['disabled'] = 'disabled'
self.fields['stateOrProvinceName'].widget.attrs['readonly'] = True
self.fields['organizationName'].widget.attrs['readonly'] = True
self.fields['localityName'].widget.attrs['readonly'] = True
class AddIntermediateCAForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
CertificateFormX509):
scope_prefix = 'cert_data'
form_name = 'cert_form'
def clean_parent(self):
return None
def clean_type(self):
return CertificateTypes.INTERMEDIATE
def __init__(self, *args, **kwargs):
kwargs.update(auto_id=False, scope_prefix=self.scope_prefix)
super().__init__(*args, **kwargs)
self.fields.pop('dn')
self.initial['type'] = CertificateTypes.INTERMEDIATE
self.initial['expires_at'] = timezone.now(
) + timezone.timedelta(weeks=520)
self.fields['expires_at'].help_text = \
'Expiration date of the intermediate certificate, ' + \
'typically 10 years. (format: yyyy-mm-dd)'
self.fields['parent'].widget = forms.HiddenInput()
self.fields['type'].widget = forms.HiddenInput()
self.fields['crl_distribution_url'].widget = forms.HiddenInput()
self.fields['ocsp_distribution_host'].widget = forms.HiddenInput()
if 'scope_prefix' in kwargs:
kwargs.pop('scope_prefix')
if 'prefix' in kwargs:
kwargs.pop('prefix')
if 'initial' in kwargs and 'dn' in kwargs['initial']:
initial = kwargs.pop('initial')
kwargs['initial'] = initial['dn']
self.dn = AddDistinguishedNameIntermediateCAForm(
scope_prefix='cert_data.dn', **kwargs)
def is_valid(self):
if not self.dn.is_valid():
self.errors.update(self.dn.errors)
return super().is_valid() and self.dn.is_valid()
class AddDistinguishedNameServerCertificateForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
DistinguishedNameForm):
scope_prefix = 'cert_data.dn'
form_name = 'cert_form'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['commonName'].help_text = 'The fully qualified domain name (FQDN) of your server. ' +\
'This must match exactly what the url or wildcard or a name mismatch error will occur.'
class AddServerCertificateForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
CertificateFormX509):
scope_prefix = 'cert_data'
form_name = 'cert_form'
def clean_parent(self):
return None
def clean_type(self):
return CertificateTypes.SERVER_CERT
def __init__(self, *args, **kwargs):
kwargs.update(auto_id=False, scope_prefix=self.scope_prefix)
super().__init__(*args, **kwargs)
self.fields.pop('dn')
self.initial['type'] = CertificateTypes.SERVER_CERT
self.initial['expires_at'] = timezone.now() + \
timezone.timedelta(weeks=52)
self.initial['passphrase_out'] = ""
self.initial['passphrase_out_confirmation'] = ""
self.fields['expires_at'].help_text = \
'Expiration date of the server certificate, ' + \
'typically 1 year. (format: yyyy-mm-dd)'
self.fields['parent'].widget = forms.HiddenInput()
self.fields['type'].widget = forms.HiddenInput()
self.fields['crl_distribution_url'].widget = forms.HiddenInput()
self.fields['ocsp_distribution_host'].widget = forms.HiddenInput()
self.fields['passphrase_out'].required = False
self.fields['passphrase_out_confirmation'].required = False
if 'scope_prefix' in kwargs:
kwargs.pop('scope_prefix')
if 'prefix' in kwargs:
kwargs.pop('prefix')
if 'initial' in kwargs and 'dn' in kwargs['initial']:
initial = kwargs.pop('initial')
kwargs['initial'] = initial['dn']
self.dn = AddDistinguishedNameServerCertificateForm(
scope_prefix='cert_data.dn', **kwargs)
def is_valid(self):
if not self.dn.is_valid():
self.errors.update(self.dn.errors)
return super().is_valid() and self.dn.is_valid()
class AddDistinguishedNameClientCertificateForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
DistinguishedNameForm):
scope_prefix = 'cert_data.dn'
form_name = 'cert_form'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[
'commonName'].help_text = 'The account name of the client, for example username or email.'
class AddClientCertificateForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
CertificateFormX509):
scope_prefix = 'cert_data'
form_name = 'cert_form'
def clean_parent(self):
return None
def clean_type(self):
return CertificateTypes.CLIENT_CERT
def __init__(self, *args, **kwargs):
kwargs.update(auto_id=False, scope_prefix=self.scope_prefix)
super().__init__(*args, **kwargs)
self.fields.pop('dn')
self.initial['type'] = CertificateTypes.CLIENT_CERT
self.initial['expires_at'] = timezone.now() + \
timezone.timedelta(weeks=52)
self.initial['passphrase_out'] = ""
self.initial['passphrase_out_confirmation'] = ""
self.fields['expires_at'].help_text = \
'Expiration date of the client certificate, ' + \
'typically 1 year. (format: yyyy-mm-dd)'
self.fields['parent'].widget = forms.HiddenInput()
self.fields['type'].widget = forms.HiddenInput()
self.fields['crl_distribution_url'].widget = forms.HiddenInput()
self.fields['ocsp_distribution_host'].widget = forms.HiddenInput()
self.fields['passphrase_out'].required = False
self.fields['passphrase_out_confirmation'].required = False
if 'scope_prefix' in kwargs:
kwargs.pop('scope_prefix')
if 'prefix' in kwargs:
kwargs.pop('prefix')
if 'initial' in kwargs and 'dn' in kwargs['initial']:
initial = kwargs.pop('initial')
kwargs['initial'] = initial['dn']
self.dn = AddDistinguishedNameClientCertificateForm(
scope_prefix='cert_data.dn', **kwargs)
def is_valid(self):
if not self.dn.is_valid():
self.errors.update(self.dn.errors)
return super().is_valid() and self.dn.is_valid()
class CertificateRevokeForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
CertificateRevokeFormX509):
scope_prefix = 'cert_data'
form_name = 'cert_form'
def clean_parent(self):
return None
def __init__(self, *args, **kwargs):
kwargs.update(auto_id=False, scope_prefix=self.scope_prefix)
super().__init__(*args, **kwargs)
class CertificateCRLForm(
NgModelFormMixin,
NgFormValidationMixin,
Bootstrap3FormMixin,
NgModelForm,
CertificateCRLFormX509):
scope_prefix = 'cert_data'
form_name = 'cert_form'
def clean_parent(self):
return None
def __init__(self, *args, **kwargs):
kwargs.update(auto_id=False, scope_prefix=self.scope_prefix)
super().__init__(*args, **kwargs)
| 34.898413
| 106
| 0.644592
| 1,139
| 10,993
| 6.019315
| 0.130817
| 0.053967
| 0.054551
| 0.056884
| 0.779755
| 0.766919
| 0.755834
| 0.755834
| 0.755834
| 0.725496
| 0
| 0.008525
| 0.242427
| 10,993
| 314
| 107
| 35.009554
| 0.814721
| 0.001183
| 0
| 0.784314
| 0
| 0
| 0.172408
| 0.015856
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094118
| false
| 0.035294
| 0.035294
| 0.039216
| 0.301961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fbfc99a1639d2ca94aa25b7ae2773ef6bd2fff9d
| 62
|
py
|
Python
|
auto_pilot/common/param.py
|
farrellsc/zAutoPilot
|
652d93690237dcb21c3cbdbdad95f917b7fec6e3
|
[
"MIT"
] | 1
|
2018-03-05T08:27:58.000Z
|
2018-03-05T08:27:58.000Z
|
auto_pilot/common/param.py
|
farrellsc/zAutoPilot
|
652d93690237dcb21c3cbdbdad95f917b7fec6e3
|
[
"MIT"
] | null | null | null |
auto_pilot/common/param.py
|
farrellsc/zAutoPilot
|
652d93690237dcb21c3cbdbdad95f917b7fec6e3
|
[
"MIT"
] | null | null | null |
from overrides import overrides
class Param(dict):
pass
| 10.333333
| 31
| 0.741935
| 8
| 62
| 5.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.209677
| 62
| 5
| 32
| 12.4
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
2205246880d02595da3d812a9d7b37f22c40a59c
| 18,999
|
py
|
Python
|
tests/hybrid/test_dispatch.py
|
Matthew-Boyd/HOPP
|
de4e40efda5bfb28361dc3d9d68d13aa465dcc52
|
[
"BSD-3-Clause"
] | null | null | null |
tests/hybrid/test_dispatch.py
|
Matthew-Boyd/HOPP
|
de4e40efda5bfb28361dc3d9d68d13aa465dcc52
|
[
"BSD-3-Clause"
] | null | null | null |
tests/hybrid/test_dispatch.py
|
Matthew-Boyd/HOPP
|
de4e40efda5bfb28361dc3d9d68d13aa465dcc52
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import pyomo.environ as pyomo
from pyomo.environ import units as u
from pyomo.opt import TerminationCondition
from pyomo.util.check_units import assert_units_consistent
from hybrid.sites import SiteInfo, flatirons_site
from hybrid.wind_source import WindPlant
from hybrid.pv_source import PVPlant
from hybrid.battery import Battery
from hybrid.hybrid_simulation import HybridSimulation
from hybrid.dispatch import *
from hybrid.dispatch.hybrid_dispatch_builder_solver import HybridDispatchBuilderSolver
@pytest.fixture
def site():
return SiteInfo(flatirons_site)
technologies = {'pv': {
'system_capacity_kw': 50 * 1000,
},
'wind': {
'num_turbines': 25,
'turbine_rating_kw': 2000
},
'battery': {
'system_capacity_kwh': 200 * 1000,
'system_capacity_kw': 50 * 1000
},
'grid': 50}
def test_solar_dispatch(site):
expected_objective = 27748.614
dispatch_n_look_ahead = 48
solar = PVPlant(site, technologies['pv'])
model = pyomo.ConcreteModel(name='solar_only')
model.forecast_horizon = pyomo.Set(initialize=range(dispatch_n_look_ahead))
solar._dispatch = PvDispatch(model,
model.forecast_horizon,
solar._system_model,
solar._financial_model)
# Manually creating objective for testing
model.price = pyomo.Param(model.forecast_horizon,
within=pyomo.Reals,
default=60.0, # assuming flat PPA of $60/MWh
mutable=True,
units=u.USD / u.MWh)
def create_test_objective_rule(m):
return sum((m.pv[i].time_duration * m.price[i] * m.pv[i].generation
- m.pv[i].generation_cost) for i in m.pv.index_set())
model.test_objective = pyomo.Objective(
rule=create_test_objective_rule,
sense=pyomo.maximize)
assert_units_consistent(model)
solar.dispatch.initialize_dispatch_model_parameters()
solar.simulate(1)
solar.dispatch.update_time_series_dispatch_model_parameters(0)
print("Total available generation: {}".format(sum(solar.dispatch.available_generation)))
results = HybridDispatchBuilderSolver.glpk_solve_call(model)
assert results.solver.termination_condition == TerminationCondition.optimal
assert pyomo.value(model.test_objective) == pytest.approx(expected_objective, 1e-5)
available_resource = solar.generation_profile[0:dispatch_n_look_ahead]
dispatch_generation = solar.dispatch.generation
for t in model.forecast_horizon:
assert dispatch_generation[t] * 1e3 == pytest.approx(available_resource[t], 1e-3)
def test_wind_dispatch(site):
expected_objective = 21011.222
dispatch_n_look_ahead = 48
wind = WindPlant(site, technologies['wind'])
model = pyomo.ConcreteModel(name='wind_only')
model.forecast_horizon = pyomo.Set(initialize=range(dispatch_n_look_ahead))
wind._dispatch = WindDispatch(model,
model.forecast_horizon,
wind._system_model,
wind._financial_model)
# Manually creating objective for testing
model.price = pyomo.Param(model.forecast_horizon,
within=pyomo.Reals,
default=60.0, # assuming flat PPA of $60/MWh
mutable=True,
units=u.USD / u.MWh)
def create_test_objective_rule(m):
return sum((m.wind[t].time_duration * m.price[t] * m.wind[t].generation
- m.wind[t].generation_cost) for t in m.wind.index_set())
model.test_objective = pyomo.Objective(
rule=create_test_objective_rule,
sense=pyomo.maximize)
assert_units_consistent(model)
wind.dispatch.initialize_dispatch_model_parameters()
wind.simulate(1)
wind.dispatch.update_time_series_dispatch_model_parameters(0)
results = HybridDispatchBuilderSolver.glpk_solve_call(model)
assert results.solver.termination_condition == TerminationCondition.optimal
assert pyomo.value(model.test_objective) == pytest.approx(expected_objective, 1e-5)
available_resource = wind.generation_profile[0:dispatch_n_look_ahead]
dispatch_generation = wind.dispatch.generation
for t in model.forecast_horizon:
assert dispatch_generation[t] * 1e3 == pytest.approx(available_resource[t], 1e-3)
def test_simple_battery_dispatch(site):
expected_objective = 31299.2696
dispatch_n_look_ahead = 48
battery = Battery(site, technologies['battery'])
model = pyomo.ConcreteModel(name='battery_only')
model.forecast_horizon = pyomo.Set(initialize=range(dispatch_n_look_ahead))
battery._dispatch = SimpleBatteryDispatch(model,
model.forecast_horizon,
battery._system_model,
battery._financial_model,
include_lifecycle_count=False)
# Manually creating objective for testing
prices = {}
block_length = 8
index = 0
for i in range(int(dispatch_n_look_ahead / block_length)):
for j in range(block_length):
if i % 2 == 0:
prices[index] = 30.0 # assuming low prices
else:
prices[index] = 100.0 # assuming high prices
index += 1
model.price = pyomo.Param(model.forecast_horizon,
within=pyomo.Reals,
initialize=prices,
mutable=True,
units=u.USD / u.MWh)
def create_test_objective_rule(m):
return sum((m.battery[t].time_duration * m.price[t] * (m.battery[t].discharge_power - m.battery[t].charge_power)
- m.battery[t].discharge_cost - m.battery[t].charge_cost) for t in m.battery.index_set())
model.test_objective = pyomo.Objective(
rule=create_test_objective_rule,
sense=pyomo.maximize)
battery.dispatch.initialize_dispatch_model_parameters()
battery.dispatch.update_time_series_dispatch_model_parameters(0)
model.initial_SOC = battery.dispatch.minimum_soc # Set initial SOC to minimum
assert_units_consistent(model)
results = HybridDispatchBuilderSolver.glpk_solve_call(model)
assert results.solver.termination_condition == TerminationCondition.optimal
assert pyomo.value(model.test_objective) == pytest.approx(expected_objective, 1e-5)
assert sum(battery.dispatch.charge_power) > 0.0
assert sum(battery.dispatch.discharge_power) > 0.0
assert (sum(battery.dispatch.charge_power) * battery.dispatch.round_trip_efficiency / 100.0
== pytest.approx(sum(battery.dispatch.discharge_power)))
battery._simulate_with_dispatch(48, 0)
for i in range(24):
dispatch_power = battery.dispatch.power[i] * 1e3
assert battery.Outputs.P[i] == pytest.approx(dispatch_power, 1e-3 * abs(dispatch_power))
def test_simple_battery_dispatch_lifecycle_count(site):
expected_objective = 26620.7096
expected_lifecycles = 2.339
dispatch_n_look_ahead = 48
battery = Battery(site, technologies['battery'])
model = pyomo.ConcreteModel(name='battery_only')
model.forecast_horizon = pyomo.Set(initialize=range(dispatch_n_look_ahead))
battery._dispatch = SimpleBatteryDispatch(model,
model.forecast_horizon,
battery._system_model,
battery._financial_model,
include_lifecycle_count=True)
# Manually creating objective for testing
prices = {}
block_length = 8
index = 0
for i in range(int(dispatch_n_look_ahead / block_length)):
for j in range(block_length):
if i % 2 == 0:
prices[index] = 30.0 # assuming low prices
else:
prices[index] = 100.0 # assuming high prices
index += 1
model.price = pyomo.Param(model.forecast_horizon,
within=pyomo.Reals,
initialize=prices,
mutable=True,
units=u.USD / u.MWh)
def create_test_objective_rule(m):
return (sum((m.battery[t].time_duration
* m.price[t]
* (m.battery[t].discharge_power - m.battery[t].charge_power)
- m.battery[t].discharge_cost
- m.battery[t].charge_cost) for t in m.battery.index_set())
- m.lifecycle_cost * m.lifecycles)
model.test_objective = pyomo.Objective(
rule=create_test_objective_rule,
sense=pyomo.maximize)
battery.dispatch.initialize_dispatch_model_parameters()
battery.dispatch.update_time_series_dispatch_model_parameters(0)
model.initial_SOC = battery.dispatch.minimum_soc # Set initial SOC to minimum
assert_units_consistent(model)
results = HybridDispatchBuilderSolver.glpk_solve_call(model)
assert results.solver.termination_condition == TerminationCondition.optimal
assert pyomo.value(model.test_objective) == pytest.approx(expected_objective, 1e-5)
assert pyomo.value(battery.dispatch.lifecycles) == pytest.approx(expected_lifecycles, 1e-3)
assert sum(battery.dispatch.charge_power) > 0.0
assert sum(battery.dispatch.discharge_power) > 0.0
assert (sum(battery.dispatch.charge_power) * battery.dispatch.round_trip_efficiency / 100.0
== pytest.approx(sum(battery.dispatch.discharge_power)))
def test_detailed_battery_dispatch(site):
expected_objective = 35221.192
expected_lifecycles = 0.292799
# TODO: McCormick error is large enough to make objective 50% higher than
# the value of simple battery dispatch objective
dispatch_n_look_ahead = 48
battery = Battery(site, technologies['battery'])
model = pyomo.ConcreteModel(name='detailed_battery_only')
model.forecast_horizon = pyomo.Set(initialize=range(dispatch_n_look_ahead))
battery._dispatch = ConvexLinearVoltageBatteryDispatch(model,
model.forecast_horizon,
battery._system_model,
battery._financial_model)
# Manually creating objective for testing
prices = {}
block_length = 8
index = 0
for i in range(int(dispatch_n_look_ahead / block_length)):
for j in range(block_length):
if i % 2 == 0:
prices[index] = 30.0 # assuming low prices
else:
prices[index] = 100.0 # assuming high prices
index += 1
model.price = pyomo.Param(model.forecast_horizon,
within=pyomo.Reals,
initialize=prices,
mutable=True,
units=u.USD / u.MWh)
def create_test_objective_rule(m):
return (sum((m.convex_LV_battery[t].time_duration
* m.price[t]
* (m.convex_LV_battery[t].discharge_power - m.convex_LV_battery[t].charge_power)
- m.convex_LV_battery[t].discharge_cost
- m.convex_LV_battery[t].charge_cost) for t in m.convex_LV_battery.index_set())
- m.lifecycle_cost * m.lifecycles)
model.test_objective = pyomo.Objective(
rule=create_test_objective_rule,
sense=pyomo.maximize)
battery.dispatch.initialize_dispatch_model_parameters()
battery.dispatch.update_time_series_dispatch_model_parameters(0)
model.initial_SOC = battery.dispatch.minimum_soc # Set initial SOC to minimum
assert_units_consistent(model)
results = HybridDispatchBuilderSolver.glpk_solve_call(model)
# TODO: trying to solve the nonlinear problem but solver doesn't work...
# Need to try another nonlinear solver
# results = HybridDispatchBuilderSolver.mindtpy_solve_call(model)
assert results.solver.termination_condition == TerminationCondition.optimal
assert pyomo.value(model.test_objective) == pytest.approx(expected_objective, 1e-3)
assert pyomo.value(battery.dispatch.lifecycles) == pytest.approx(expected_lifecycles, 1e-3)
assert sum(battery.dispatch.charge_power) > 0.0
assert sum(battery.dispatch.discharge_power) > 0.0
assert sum(battery.dispatch.charge_current) > sum(battery.dispatch.discharge_current)
# assert sum(battery.dispatch.charge_power) > sum(battery.dispatch.discharge_power)
# TODO: model cheats too much where last test fails
def test_hybrid_dispatch(site):
expected_objective = 42073.267
hybrid_plant = HybridSimulation(technologies, site, technologies['grid'] * 1000)
hybrid_plant.pv.simulate(1)
hybrid_plant.wind.simulate(1)
hybrid_plant.dispatch_builder.dispatch.update_time_series_dispatch_model_parameters(0)
hybrid_plant.battery.dispatch.initial_SOC = hybrid_plant.battery.dispatch.minimum_soc # Set to min SOC
results = HybridDispatchBuilderSolver.glpk_solve_call(hybrid_plant.dispatch_builder.pyomo_model)
assert results.solver.termination_condition == TerminationCondition.optimal
gross_profit_objective = pyomo.value(hybrid_plant.dispatch_builder.dispatch.objective_value)
assert gross_profit_objective == pytest.approx(expected_objective, 1e-3)
n_look_ahead_periods = hybrid_plant.dispatch_builder.options.n_look_ahead_periods
available_resource = hybrid_plant.pv.generation_profile[0:n_look_ahead_periods]
dispatch_generation = hybrid_plant.pv.dispatch.generation
for t in hybrid_plant.dispatch_builder.pyomo_model.forecast_horizon:
assert dispatch_generation[t] * 1e3 == pytest.approx(available_resource[t], 1e-3)
available_resource = hybrid_plant.wind.generation_profile[0:n_look_ahead_periods]
dispatch_generation = hybrid_plant.wind.dispatch.generation
for t in hybrid_plant.dispatch_builder.pyomo_model.forecast_horizon:
assert dispatch_generation[t] * 1e3 == pytest.approx(available_resource[t], 1e-3)
assert sum(hybrid_plant.battery.dispatch.charge_power) > 0.0
assert sum(hybrid_plant.battery.dispatch.discharge_power) > 0.0
assert (sum(hybrid_plant.battery.dispatch.charge_power)
* hybrid_plant.battery.dispatch.round_trip_efficiency / 100.0
== pytest.approx(sum(hybrid_plant.battery.dispatch.discharge_power)))
transmission_limit = hybrid_plant.grid.value('grid_interconnection_limit_kwac')
system_generation = hybrid_plant.grid.dispatch.system_generation
for t in hybrid_plant.dispatch_builder.pyomo_model.forecast_horizon:
assert system_generation[t] * 1e3 <= transmission_limit
assert system_generation[t] * 1e3 >= 0.0
def test_hybrid_dispatch_heuristic(site):
dispatch_options = {'battery_dispatch': 'heuristic'}
hybrid_plant = HybridSimulation(technologies, site, technologies['grid'] * 1000,
dispatch_options=dispatch_options)
fixed_dispatch = [0.0]*6
fixed_dispatch.extend([-1.0]*6)
fixed_dispatch.extend([1.0]*6)
fixed_dispatch.extend([0.0]*6)
hybrid_plant.battery.dispatch.user_fixed_dispatch = fixed_dispatch
hybrid_plant.simulate(1)
assert sum(hybrid_plant.battery.dispatch.charge_power) > 0.0
assert sum(hybrid_plant.battery.dispatch.discharge_power) > 0.0
def test_hybrid_dispatch_one_cycle_heuristic(site):
dispatch_options = {'battery_dispatch': 'one_cycle_heuristic'}
hybrid_plant = HybridSimulation(technologies, site, technologies['grid'] * 1000,
dispatch_options=dispatch_options)
hybrid_plant.simulate(1)
assert sum(hybrid_plant.battery.Outputs.P) < 0.0
def test_hybrid_solar_battery_dispatch(site):
expected_objective = 37394.8194 # 35733.817341
solar_battery_technologies = {k: technologies[k] for k in ('pv', 'battery', 'grid')}
hybrid_plant = HybridSimulation(solar_battery_technologies, site, technologies['grid'] * 1000)
hybrid_plant.pv.simulate(1)
hybrid_plant.dispatch_builder.dispatch.update_time_series_dispatch_model_parameters(0)
hybrid_plant.battery.dispatch.initial_SOC = hybrid_plant.battery.dispatch.minimum_soc # Set to min SOC
n_look_ahead_periods = hybrid_plant.dispatch_builder.options.n_look_ahead_periods
# This was done because the default peak prices coincide with solar production...
available_resource = hybrid_plant.pv.generation_profile[0:n_look_ahead_periods]
prices = [0.] * len(available_resource)
for t in hybrid_plant.dispatch_builder.pyomo_model.forecast_horizon:
if available_resource[t] > 0.0:
prices[t] = 30.0
else:
prices[t] = 110.0
hybrid_plant.grid.dispatch.electricity_sell_price = prices
hybrid_plant.grid.dispatch.electricity_purchase_price = prices
results = HybridDispatchBuilderSolver.glpk_solve_call(hybrid_plant.dispatch_builder.pyomo_model)
assert results.solver.termination_condition == TerminationCondition.optimal
gross_profit_objective = pyomo.value(hybrid_plant.dispatch_builder.dispatch.objective_value)
assert gross_profit_objective == pytest.approx(expected_objective, 1e-3)
available_resource = hybrid_plant.pv.generation_profile[0:n_look_ahead_periods]
dispatch_generation = hybrid_plant.pv.dispatch.generation
for t in hybrid_plant.dispatch_builder.pyomo_model.forecast_horizon:
assert dispatch_generation[t] * 1e3 == pytest.approx(available_resource[t], 1e-3)
assert sum(hybrid_plant.battery.dispatch.charge_power) > 0.0
assert sum(hybrid_plant.battery.dispatch.discharge_power) > 0.0
assert (sum(hybrid_plant.battery.dispatch.charge_power)
* hybrid_plant.battery.dispatch.round_trip_efficiency / 100.0
== pytest.approx(sum(hybrid_plant.battery.dispatch.discharge_power)))
transmission_limit = hybrid_plant.grid.value('grid_interconnection_limit_kwac')
system_generation = hybrid_plant.grid.dispatch.system_generation
for t in hybrid_plant.dispatch_builder.pyomo_model.forecast_horizon:
assert system_generation[t] * 1e3 <= transmission_limit
assert system_generation[t] * 1e3 >= 0.0
def test_hybrid_dispatch_financials(site):
hybrid_plant = HybridSimulation(technologies, site, technologies['grid'] * 1000)
hybrid_plant.simulate(1)
assert sum(hybrid_plant.battery.Outputs.P) < 0.0
| 43.081633
| 120
| 0.679141
| 2,226
| 18,999
| 5.537287
| 0.108266
| 0.050868
| 0.01866
| 0.035859
| 0.818433
| 0.781438
| 0.759452
| 0.757667
| 0.745578
| 0.735194
| 0
| 0.022868
| 0.235855
| 18,999
| 441
| 121
| 43.081633
| 0.826147
| 0.053582
| 0
| 0.658385
| 0
| 0
| 0.020998
| 0.004623
| 0
| 0
| 0
| 0.002268
| 0.158385
| 1
| 0.049689
| false
| 0
| 0.037267
| 0.018634
| 0.10559
| 0.003106
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
227a0f9ba3b73a9e9d9a714e6c93bb09e773a439
| 168
|
py
|
Python
|
learning_object/collections/resources/__init__.py
|
dsvalenciah/ROAp
|
24cbff0e719c5009ec1f1e7190924d4d9297e992
|
[
"MIT"
] | 4
|
2018-04-23T00:04:01.000Z
|
2018-10-28T22:56:51.000Z
|
learning_object/collections/resources/__init__.py
|
dsvalenciah/ROAp
|
24cbff0e719c5009ec1f1e7190924d4d9297e992
|
[
"MIT"
] | 23
|
2017-12-22T08:27:35.000Z
|
2021-12-13T19:57:35.000Z
|
learning_object/collections/resources/__init__.py
|
dsvalenciah/ROAp
|
24cbff0e719c5009ec1f1e7190924d4d9297e992
|
[
"MIT"
] | 1
|
2020-06-03T02:07:26.000Z
|
2020-06-03T02:07:26.000Z
|
from .lo_collection_collection import LOCollectionCollection
from .lo_collection import LOCollection
from .lo_sub_collection_collection import LOSubCollectionCollection
| 56
| 67
| 0.916667
| 18
| 168
| 8.222222
| 0.444444
| 0.121622
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065476
| 168
| 3
| 67
| 56
| 0.942675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
97f9367ec01f5749a34dbcad966cdf91827e078f
| 207
|
py
|
Python
|
extra_tests/snippets/name.py
|
dbrgn/RustPython
|
6d371cea8a62d84dbbeec5a53cfd040f45899211
|
[
"CC-BY-4.0",
"MIT"
] | 11,058
|
2018-05-29T07:40:06.000Z
|
2022-03-31T11:38:42.000Z
|
extra_tests/snippets/name.py
|
dbrgn/RustPython
|
6d371cea8a62d84dbbeec5a53cfd040f45899211
|
[
"CC-BY-4.0",
"MIT"
] | 2,105
|
2018-06-01T10:07:16.000Z
|
2022-03-31T14:56:42.000Z
|
extra_tests/snippets/name.py
|
dbrgn/RustPython
|
6d371cea8a62d84dbbeec5a53cfd040f45899211
|
[
"CC-BY-4.0",
"MIT"
] | 914
|
2018-07-27T09:36:14.000Z
|
2022-03-31T19:56:34.000Z
|
#when name.py is run __name__ should equal to __main__
assert __name__ == "__main__"
from import_name import import_func
#__name__ should be set to import_func
import_func()
assert __name__ == "__main__"
| 20.7
| 54
| 0.78744
| 31
| 207
| 4.225806
| 0.483871
| 0.229008
| 0.21374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149758
| 207
| 9
| 55
| 23
| 0.744318
| 0.434783
| 0
| 0.5
| 0
| 0
| 0.13913
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3f30a5fabf1a420defbeb529c9b64ae88a482b5e
| 2,308
|
py
|
Python
|
epytope/Data/pssms/smm/mat/A_29_02_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smm/mat/A_29_02_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smm/mat/A_29_02_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_29_02_9 = {0: {'A': 0.125, 'C': 0.041, 'E': 0.423, 'D': 0.252, 'G': -0.041, 'F': -0.33, 'I': -0.091, 'H': 0.19, 'K': 0.242, 'M': -0.491, 'L': -0.018, 'N': 0.099, 'Q': -0.037, 'P': 0.271, 'S': -0.049, 'R': 0.154, 'T': -0.04, 'W': -0.201, 'V': 0.011, 'Y': -0.511}, 1: {'A': 0.143, 'C': 0.565, 'E': 0.53, 'D': 0.499, 'G': 0.457, 'F': -0.772, 'I': -0.193, 'H': -0.006, 'K': 0.587, 'M': -0.88, 'L': -0.318, 'N': -0.018, 'Q': 0.229, 'P': 0.246, 'S': 0.095, 'R': 0.477, 'T': -0.311, 'W': -0.449, 'V': -0.451, 'Y': -0.432}, 2: {'A': -0.246, 'C': -0.0, 'E': 0.509, 'D': 0.309, 'G': 0.106, 'F': -0.441, 'I': -0.293, 'H': -0.018, 'K': 0.419, 'M': -0.113, 'L': -0.108, 'N': 0.026, 'Q': 0.07, 'P': 0.293, 'S': -0.022, 'R': 0.203, 'T': 0.056, 'W': -0.442, 'V': 0.021, 'Y': -0.329}, 3: {'A': -0.04, 'C': 0.024, 'E': 0.082, 'D': -0.072, 'G': -0.054, 'F': -0.208, 'I': 0.053, 'H': -0.004, 'K': 0.192, 'M': 0.078, 'L': 0.025, 'N': 0.018, 'Q': -0.046, 'P': -0.115, 'S': -0.058, 'R': 0.064, 'T': 0.035, 'W': -0.014, 'V': 0.053, 'Y': -0.011}, 4: {'A': 0.093, 'C': -0.043, 'E': 0.147, 'D': 0.037, 'G': -0.078, 'F': -0.075, 'I': -0.117, 'H': -0.037, 'K': 0.146, 'M': -0.052, 'L': -0.091, 'N': 0.026, 'Q': 0.011, 'P': 0.033, 'S': 0.134, 'R': 0.123, 'T': -0.016, 'W': -0.179, 'V': 0.034, 'Y': -0.095}, 5: {'A': -0.053, 'C': -0.102, 'E': 0.165, 'D': -0.036, 'G': -0.034, 'F': -0.088, 'I': 0.112, 'H': -0.09, 'K': 0.17, 'M': -0.003, 'L': 0.005, 'N': 0.075, 'Q': -0.022, 'P': -0.017, 'S': -0.013, 'R': 0.059, 'T': -0.103, 'W': -0.044, 'V': 0.053, 'Y': -0.035}, 6: {'A': -0.029, 'C': 0.142, 'E': 0.136, 'D': 0.316, 'G': 0.016, 'F': -0.356, 'I': 0.069, 'H': -0.196, 'K': 0.313, 'M': -0.031, 'L': -0.493, 'N': 0.164, 'Q': 0.146, 'P': -0.057, 'S': 0.114, 'R': 0.256, 'T': -0.001, 'W': -0.083, 'V': 0.103, 'Y': -0.529}, 7: {'A': -0.016, 'C': 0.151, 'E': -0.07, 'D': -0.04, 'G': 0.058, 'F': -0.17, 'I': 0.175, 'H': 0.102, 'K': 0.046, 'M': -0.064, 'L': -0.45, 'N': 0.044, 'Q': -0.012, 'P': -0.068, 'S': 0.113, 'R': 0.017, 'T': 0.05, 'W': 0.191, 'V': 0.206, 'Y': -0.263}, 8: {'A': 0.238, 'C': 0.122, 'E': 0.399, 'D': 0.38, 'G': 0.428, 'F': -0.705, 'I': -0.006, 'H': -0.37, 'K': 0.153, 'M': -0.557, 'L': 0.097, 'N': 0.208, 'Q': 0.153, 'P': 0.482, 'S': 0.287, 'R': 0.296, 'T': 0.371, 'W': -0.054, 'V': 0.065, 'Y': -1.989}, -1: {'con': 4.45576}}
| 2,308
| 2,308
| 0.394281
| 557
| 2,308
| 1.628366
| 0.310592
| 0.019846
| 0.011025
| 0.01323
| 0.046307
| 0
| 0
| 0
| 0
| 0
| 0
| 0.373643
| 0.161612
| 2,308
| 1
| 2,308
| 2,308
| 0.09509
| 0
| 0
| 0
| 0
| 0
| 0.079255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f42c55897f07386419e8fa33c1be39db0d1ee41
| 10,941
|
py
|
Python
|
tencentcloud/tbp/v20190627/models.py
|
PlasticMem/tencentcloud-sdk-python
|
666db85623d51d640a165907a19aef5fba53b38d
|
[
"Apache-2.0"
] | 465
|
2018-04-27T09:54:59.000Z
|
2022-03-29T02:18:01.000Z
|
tencentcloud/tbp/v20190627/models.py
|
PlasticMem/tencentcloud-sdk-python
|
666db85623d51d640a165907a19aef5fba53b38d
|
[
"Apache-2.0"
] | 91
|
2018-04-27T09:48:11.000Z
|
2022-03-12T08:04:04.000Z
|
tencentcloud/tbp/v20190627/models.py
|
PlasticMem/tencentcloud-sdk-python
|
666db85623d51d640a165907a19aef5fba53b38d
|
[
"Apache-2.0"
] | 232
|
2018-05-02T08:02:46.000Z
|
2022-03-30T08:02:48.000Z
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class Group(AbstractModel):
"""Group是消息组的具体定义,当前包含ContentType、Url、Content三个字段。其中,具体的ContentType字段定义,参考互联网MIME类型标准。
"""
def __init__(self):
r"""
:param ContentType: 消息类型参考互联网MIME类型标准,当前仅支持"text/plain"。
:type ContentType: str
:param Url: 返回内容以链接形式提供。
注意:此字段可能返回 null,表示取不到有效值。
:type Url: str
:param Content: 普通文本。
注意:此字段可能返回 null,表示取不到有效值。
:type Content: str
"""
self.ContentType = None
self.Url = None
self.Content = None
def _deserialize(self, params):
self.ContentType = params.get("ContentType")
self.Url = params.get("Url")
self.Content = params.get("Content")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ResponseMessage(AbstractModel):
"""从TBP-RTS服务v1.3版本起,机器人以消息组列表的形式响应,消息组列表GroupList包含多组消息,用户根据需要对部分或全部消息组进行组合使用。
"""
def __init__(self):
r"""
:param GroupList: 消息组列表。
注意:此字段可能返回 null,表示取不到有效值。
:type GroupList: list of Group
"""
self.GroupList = None
def _deserialize(self, params):
if params.get("GroupList") is not None:
self.GroupList = []
for item in params.get("GroupList"):
obj = Group()
obj._deserialize(item)
self.GroupList.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SlotInfo(AbstractModel):
"""槽位信息
"""
def __init__(self):
r"""
:param SlotName: 槽位名称
注意:此字段可能返回 null,表示取不到有效值。
:type SlotName: str
:param SlotValue: 槽位值
注意:此字段可能返回 null,表示取不到有效值。
:type SlotValue: str
"""
self.SlotName = None
self.SlotValue = None
def _deserialize(self, params):
self.SlotName = params.get("SlotName")
self.SlotValue = params.get("SlotValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TextProcessRequest(AbstractModel):
"""TextProcess请求参数结构体
"""
def __init__(self):
r"""
:param BotId: 机器人标识,用于定义抽象机器人。
:type BotId: str
:param BotEnv: 机器人版本,取值"dev"或"release",{调试版本:dev;线上版本:release}。
:type BotEnv: str
:param TerminalId: 终端标识,每个终端(或线程)对应一个,区分并发多用户。
:type TerminalId: str
:param InputText: 请求的文本。
:type InputText: str
:param SessionAttributes: 透传字段,透传给用户自定义的WebService服务。
:type SessionAttributes: str
:param PlatformType: 平台类型,{小程序:MiniProgram;小微:XiaoWei;公众号:OfficialAccount;企业微信: WXWork}。
:type PlatformType: str
:param PlatformId: 当PlatformType为微信公众号或企业微信时,传递对应微信公众号或企业微信的唯一标识
:type PlatformId: str
"""
self.BotId = None
self.BotEnv = None
self.TerminalId = None
self.InputText = None
self.SessionAttributes = None
self.PlatformType = None
self.PlatformId = None
def _deserialize(self, params):
self.BotId = params.get("BotId")
self.BotEnv = params.get("BotEnv")
self.TerminalId = params.get("TerminalId")
self.InputText = params.get("InputText")
self.SessionAttributes = params.get("SessionAttributes")
self.PlatformType = params.get("PlatformType")
self.PlatformId = params.get("PlatformId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TextProcessResponse(AbstractModel):
"""TextProcess返回参数结构体
"""
def __init__(self):
r"""
:param DialogStatus: 当前会话状态{会话开始: START; 会话中: COUTINUE; 会话结束: COMPLETE}。
注意:此字段可能返回 null,表示取不到有效值。
:type DialogStatus: str
:param BotName: 匹配到的机器人名称。
注意:此字段可能返回 null,表示取不到有效值。
:type BotName: str
:param IntentName: 匹配到的意图名称。
注意:此字段可能返回 null,表示取不到有效值。
:type IntentName: str
:param SlotInfoList: 槽位信息。
注意:此字段可能返回 null,表示取不到有效值。
:type SlotInfoList: list of SlotInfo
:param InputText: 原始的用户说法。
注意:此字段可能返回 null,表示取不到有效值。
:type InputText: str
:param ResponseMessage: 机器人应答。
注意:此字段可能返回 null,表示取不到有效值。
:type ResponseMessage: :class:`tencentcloud.tbp.v20190627.models.ResponseMessage`
:param SessionAttributes: 透传字段,由用户自定义的WebService服务返回。
注意:此字段可能返回 null,表示取不到有效值。
:type SessionAttributes: str
:param ResultType: 结果类型 {中间逻辑出错:0; 任务型机器人:1; 问答型机器人:2; 闲聊型机器人:3; 未匹配上,返回预设兜底话术:5; 未匹配上,返回相似问题列表:6}。
注意:此字段可能返回 null,表示取不到有效值。
:type ResultType: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DialogStatus = None
self.BotName = None
self.IntentName = None
self.SlotInfoList = None
self.InputText = None
self.ResponseMessage = None
self.SessionAttributes = None
self.ResultType = None
self.RequestId = None
def _deserialize(self, params):
self.DialogStatus = params.get("DialogStatus")
self.BotName = params.get("BotName")
self.IntentName = params.get("IntentName")
if params.get("SlotInfoList") is not None:
self.SlotInfoList = []
for item in params.get("SlotInfoList"):
obj = SlotInfo()
obj._deserialize(item)
self.SlotInfoList.append(obj)
self.InputText = params.get("InputText")
if params.get("ResponseMessage") is not None:
self.ResponseMessage = ResponseMessage()
self.ResponseMessage._deserialize(params.get("ResponseMessage"))
self.SessionAttributes = params.get("SessionAttributes")
self.ResultType = params.get("ResultType")
self.RequestId = params.get("RequestId")
class TextResetRequest(AbstractModel):
"""TextReset请求参数结构体
"""
def __init__(self):
r"""
:param BotId: 机器人标识,用于定义抽象机器人。
:type BotId: str
:param BotEnv: 机器人版本,取值"dev"或"release",{调试版本:dev;线上版本:release}。
:type BotEnv: str
:param TerminalId: 终端标识,每个终端(或线程)对应一个,区分并发多用户。
:type TerminalId: str
:param PlatformType: 平台类型,{小程序:MiniProgram;小微:XiaoWei;公众号:OfficialAccount;企业微信: WXWork}。
:type PlatformType: str
:param PlatformId: 当PlatformType为微信公众号或企业微信时,传递对应微信公众号或企业微信的唯一标识
:type PlatformId: str
"""
self.BotId = None
self.BotEnv = None
self.TerminalId = None
self.PlatformType = None
self.PlatformId = None
def _deserialize(self, params):
self.BotId = params.get("BotId")
self.BotEnv = params.get("BotEnv")
self.TerminalId = params.get("TerminalId")
self.PlatformType = params.get("PlatformType")
self.PlatformId = params.get("PlatformId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TextResetResponse(AbstractModel):
"""TextReset返回参数结构体
"""
def __init__(self):
r"""
:param DialogStatus: 当前会话状态{会话开始: START; 会话中: COUTINUE; 会话结束: COMPLETE}。
注意:此字段可能返回 null,表示取不到有效值。
:type DialogStatus: str
:param BotName: 匹配到的机器人名称。
注意:此字段可能返回 null,表示取不到有效值。
:type BotName: str
:param IntentName: 匹配到的意图名称。
注意:此字段可能返回 null,表示取不到有效值。
:type IntentName: str
:param SlotInfoList: 槽位信息。
注意:此字段可能返回 null,表示取不到有效值。
:type SlotInfoList: list of SlotInfo
:param InputText: 原始的用户说法。
注意:此字段可能返回 null,表示取不到有效值。
:type InputText: str
:param ResponseMessage: 机器人应答。
注意:此字段可能返回 null,表示取不到有效值。
:type ResponseMessage: :class:`tencentcloud.tbp.v20190627.models.ResponseMessage`
:param SessionAttributes: 透传字段,由用户自定义的WebService服务返回。
注意:此字段可能返回 null,表示取不到有效值。
:type SessionAttributes: str
:param ResultType: 结果类型 {中间逻辑出错:0; 任务型机器人:1; 问答型机器人:2; 闲聊型机器人:3; 未匹配上,返回预设兜底话术:5; 未匹配上,返回相似问题列表:6}。
注意:此字段可能返回 null,表示取不到有效值。
:type ResultType: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DialogStatus = None
self.BotName = None
self.IntentName = None
self.SlotInfoList = None
self.InputText = None
self.ResponseMessage = None
self.SessionAttributes = None
self.ResultType = None
self.RequestId = None
def _deserialize(self, params):
self.DialogStatus = params.get("DialogStatus")
self.BotName = params.get("BotName")
self.IntentName = params.get("IntentName")
if params.get("SlotInfoList") is not None:
self.SlotInfoList = []
for item in params.get("SlotInfoList"):
obj = SlotInfo()
obj._deserialize(item)
self.SlotInfoList.append(obj)
self.InputText = params.get("InputText")
if params.get("ResponseMessage") is not None:
self.ResponseMessage = ResponseMessage()
self.ResponseMessage._deserialize(params.get("ResponseMessage"))
self.SessionAttributes = params.get("SessionAttributes")
self.ResultType = params.get("ResultType")
self.RequestId = params.get("RequestId")
| 33.356707
| 107
| 0.626817
| 1,161
| 10,941
| 5.850129
| 0.196382
| 0.054329
| 0.040194
| 0.064929
| 0.766784
| 0.725118
| 0.708775
| 0.708775
| 0.708775
| 0.708775
| 0
| 0.006233
| 0.266795
| 10,941
| 328
| 108
| 33.356707
| 0.840439
| 0.413034
| 0
| 0.788079
| 0
| 0
| 0.088043
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092715
| false
| 0
| 0.013245
| 0
| 0.152318
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f5ba5c47ed0a87f762f7dc0026005905f056dae
| 33
|
py
|
Python
|
cointanalysis/__init__.py
|
vishalbelsare/cointanalysis
|
ae21c520dfe500fe535265e93df4a36f4d012069
|
[
"BSD-3-Clause"
] | 27
|
2020-01-03T03:36:42.000Z
|
2022-03-28T06:47:32.000Z
|
cointanalysis/__init__.py
|
simaki/cointanalysis
|
ae21c520dfe500fe535265e93df4a36f4d012069
|
[
"BSD-3-Clause"
] | 26
|
2020-01-03T09:02:21.000Z
|
2022-02-22T01:01:48.000Z
|
cointanalysis/__init__.py
|
vishalbelsare/cointanalysis
|
ae21c520dfe500fe535265e93df4a36f4d012069
|
[
"BSD-3-Clause"
] | 8
|
2021-02-09T22:19:18.000Z
|
2022-02-23T19:45:24.000Z
|
from .coint import CointAnalysis
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
58c6179b12a2e8dfb4b11aa9743eff3977e39a6d
| 1,879
|
py
|
Python
|
pkg/pymod_indx.py
|
timy/dm_spec
|
1e717c4ebf38cf847b845daea25d687cb115c245
|
[
"MIT"
] | 2
|
2015-06-05T03:20:38.000Z
|
2020-08-24T23:42:28.000Z
|
pkg/pymod_indx.py
|
timy/dm_spec
|
1e717c4ebf38cf847b845daea25d687cb115c245
|
[
"MIT"
] | null | null | null |
pkg/pymod_indx.py
|
timy/dm_spec
|
1e717c4ebf38cf847b845daea25d687cb115c245
|
[
"MIT"
] | 4
|
2019-10-01T00:18:31.000Z
|
2021-04-04T15:38:29.000Z
|
directions_2 = [
[ 1, 0 ], [ 0, 1 ],
[ 2, 0 ], [ 0, 2 ],
[ 1, 1 ], [ 1, -1 ],
[ 3, 0 ], [ 0, 3 ],
[ 1, 2 ], [ 2, 1 ],
[ 1, -2 ], [-2, 1 ],
[ 4, 0 ], [ 0, 4 ],
[ 1, 3 ], [ 3, 1 ],
[ 1, -3 ], [-3, 1 ],
[ 2, 2 ], [ 2, -2 ],
[ 5, 0 ], [ 0, 5 ],
[ 1, 4 ], [ 4, 1 ],
[ 1, -4 ], [-4, 1 ],
[ 2, 3 ], [ 3, 2 ],
[ 2, -3 ], [-3, 2 ],
[-1, 0 ], [ 0, -1 ],
[-2, 0 ], [ 0, -2 ],
[-1, -1 ], [-1, 1 ],
[-3, 0 ], [ 0, -3 ],
[-1, -2 ], [-2, -1 ],
[-1, 2 ], [ 2, -1 ],
[-4, 0 ], [ 0, -4 ],
[-1, -3 ], [-3, -1 ],
[-1, 3 ], [ 3, -1 ],
[-2, -2 ], [-2, 2 ],
[-5, 0 ], [ 0, -5 ],
[-1, -4 ], [-4, -1 ],
[-1, 4 ], [ 4, -1 ],
[-2, -3 ], [-3, -2 ],
[-2, 3 ], [ 3, -2 ],
[ 0, 0 ]
]
directions_3 = [
[ 1, 0, 0], [ 0, 1, 0], [ 0, 0, 1],
[-1, 1, 1], [ 1, -1, 1], [ 1, 1, -1],
[-2, 1, 0], [ 1, -2, 0], [-2, 0, 1],
[ 0, -2, 1], [ 1, 0, -2], [ 0, 1, -2],
[-1, 0, 0], [ 0, -1, 0], [ 0, 0, -1],
[ 1, -1, -1], [-1, 1, -1], [-1, -1, 1],
[ 2, -1, 0], [-1, 2, 0], [ 2, 0, -1],
[ 0, 2, -1], [-1, 0, 2], [ 0, -1, 2],
[ 3, 0, 0], [ 0, 3, 0], [ 0, 0, 3],
[ 2, 1, 0], [ 2, 0, 1], [ 1, 2, 0],
[ 0, 2, 1], [ 1, 0, 2], [ 0, 1, 2],
[ 1, 1, 1],
[-3, 0, 0], [ 0, -3, 0], [ 0, 0, -3],
[-2, -1, 0], [-2, 0, -1], [-1, -2, 0],
[ 0, -2, -1], [-1, 0, -2], [ 0, -1, -2],
[-1, -1, -1]
]
def idx_ppar( directions, idx, coo ):
""" Return a pair [ index, data_name ]"""
cood = ["x", "y", "z"]
return [ directions.index(idx) * 6 + cood.index(coo) * 2,
"[%s] in %s-axis" %
( ', '.join( map( str, idx ) ), coo ) ]
| 25.053333
| 61
| 0.212347
| 292
| 1,879
| 1.35274
| 0.09589
| 0.202532
| 0.167089
| 0.162025
| 0.640506
| 0.640506
| 0.640506
| 0.640506
| 0.640506
| 0.640506
| 0
| 0.253937
| 0.459287
| 1,879
| 74
| 62
| 25.391892
| 0.134843
| 0.018095
| 0
| 0
| 0
| 0
| 0.010881
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0
| 0
| 0.035714
| 0
| 0
| 0
| 1
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
58e64313a37debbc7970de08240ab9582d4b3a60
| 33
|
py
|
Python
|
optimizer/__init__.py
|
zzh237/la
|
f363ef5ff5a540d716b585d752d344def194d31b
|
[
"MIT"
] | null | null | null |
optimizer/__init__.py
|
zzh237/la
|
f363ef5ff5a540d716b585d752d344def194d31b
|
[
"MIT"
] | null | null | null |
optimizer/__init__.py
|
zzh237/la
|
f363ef5ff5a540d716b585d752d344def194d31b
|
[
"MIT"
] | null | null | null |
from .pytorch_optimizer import *
| 33
| 33
| 0.818182
| 4
| 33
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
45198d0766090e926ecb6754d73859bac9158b9b
| 44
|
py
|
Python
|
py2vega/__init__.py
|
martinRenou/py2vega
|
fa90b2670404f510b352e8a7ff1a4353f0040852
|
[
"BSD-3-Clause"
] | 9
|
2019-08-19T07:17:10.000Z
|
2021-03-13T21:46:32.000Z
|
py2vega/__init__.py
|
martinRenou/py2vega
|
fa90b2670404f510b352e8a7ff1a4353f0040852
|
[
"BSD-3-Clause"
] | 16
|
2019-08-19T12:13:07.000Z
|
2021-03-03T08:32:24.000Z
|
py2vega/__init__.py
|
martinRenou/py2vega
|
fa90b2670404f510b352e8a7ff1a4353f0040852
|
[
"BSD-3-Clause"
] | 3
|
2019-08-19T07:17:16.000Z
|
2020-10-15T17:07:05.000Z
|
from .main import py2vega, Variable # noqa
| 22
| 43
| 0.75
| 6
| 44
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.181818
| 44
| 1
| 44
| 44
| 0.888889
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
189283eee6340593df13f51960d40694564fd2a6
| 129
|
py
|
Python
|
stardog/tests/conftest.py
|
chrroberts-pure/integrations-extras
|
d2ff09d5cdc50ad1d2a826ea5404cddca0198afa
|
[
"BSD-3-Clause"
] | null | null | null |
stardog/tests/conftest.py
|
chrroberts-pure/integrations-extras
|
d2ff09d5cdc50ad1d2a826ea5404cddca0198afa
|
[
"BSD-3-Clause"
] | null | null | null |
stardog/tests/conftest.py
|
chrroberts-pure/integrations-extras
|
d2ff09d5cdc50ad1d2a826ea5404cddca0198afa
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
@pytest.fixture(scope="session")
def dd_environment():
yield
@pytest.fixture
def instance():
return {}
| 10.75
| 32
| 0.689922
| 15
| 129
| 5.866667
| 0.733333
| 0.295455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178295
| 129
| 11
| 33
| 11.727273
| 0.830189
| 0
| 0
| 0
| 0
| 0
| 0.054264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.142857
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
18c9be5479bb34fd459f4c1f404bb2d808682dba
| 86
|
py
|
Python
|
CodeForce/code/quiz_281A.py
|
Muzque/Leetcode
|
d06365792c9ef48e0a290da00ba5e71f212554d5
|
[
"MIT"
] | 1
|
2021-05-11T09:52:38.000Z
|
2021-05-11T09:52:38.000Z
|
CodeForce/code/quiz_281A.py
|
Muzque/Leetcode
|
d06365792c9ef48e0a290da00ba5e71f212554d5
|
[
"MIT"
] | null | null | null |
CodeForce/code/quiz_281A.py
|
Muzque/Leetcode
|
d06365792c9ef48e0a290da00ba5e71f212554d5
|
[
"MIT"
] | 1
|
2021-05-05T04:13:17.000Z
|
2021-05-05T04:13:17.000Z
|
i = input(); print(i[0].capitalize()+i[1:])
# i = input(); print(i[0].upper()+i[1:])
| 21.5
| 43
| 0.523256
| 16
| 86
| 2.8125
| 0.4375
| 0.266667
| 0.488889
| 0.533333
| 0.577778
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.116279
| 86
| 3
| 44
| 28.666667
| 0.539474
| 0.44186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
18ccabdd68f7347ae698efb45a0945a2005f6911
| 17,383
|
py
|
Python
|
tests/test_elements/test_ui_horizontal_scroll_bar.py
|
glipR/pygame_gui
|
0cbf7056518377b455d51a8d20167f4029756ad9
|
[
"MIT"
] | 339
|
2019-10-30T01:42:23.000Z
|
2022-03-31T06:11:18.000Z
|
tests/test_elements/test_ui_horizontal_scroll_bar.py
|
glipR/pygame_gui
|
0cbf7056518377b455d51a8d20167f4029756ad9
|
[
"MIT"
] | 236
|
2019-10-15T18:33:06.000Z
|
2022-03-03T19:18:09.000Z
|
tests/test_elements/test_ui_horizontal_scroll_bar.py
|
glipR/pygame_gui
|
0cbf7056518377b455d51a8d20167f4029756ad9
|
[
"MIT"
] | 55
|
2019-11-02T09:19:56.000Z
|
2022-01-21T18:48:24.000Z
|
import os
import pytest
import pygame
from tests.shared_fixtures import _init_pygame, default_ui_manager
from tests.shared_fixtures import default_display_surface, _display_surface_return_none
from tests.shared_comparators import compare_surfaces
from pygame_gui.ui_manager import UIManager
from pygame_gui.elements.ui_horizontal_scroll_bar import UIHorizontalScrollBar
from pygame_gui.core.ui_container import UIContainer
from pygame_gui.core.interfaces import IUIManagerInterface
try:
pygame.MOUSEWHEEL
except AttributeError:
pygame.MOUSEWHEEL = -1
class TestUIHorizontalScrollBar:
def test_creation(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
assert scroll_bar.image is not None
def test_rebuild(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.rebuild()
assert scroll_bar.image is not None
def test_check_has_moved_recently(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
# move the scroll bar a bit
scroll_bar.right_button.held = True
scroll_bar.update(0.2)
assert scroll_bar.check_has_moved_recently() is True
def test_check_update_buttons(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
# scroll down a bit then up again to exercise update
scroll_bar.right_button.held = True
scroll_bar.update(0.3)
scroll_bar.right_button.held = False
scroll_bar.left_button.held = True
scroll_bar.update(0.3)
assert scroll_bar.check_has_moved_recently() is True
def test_check_update_sliding_bar(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(0, 0, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
# scroll down a bit then up again to exercise update
default_ui_manager.mouse_position = (100, 15)
scroll_bar.sliding_button.held = True
scroll_bar.update(0.3)
assert scroll_bar.grabbed_slider is True
scroll_bar.sliding_button.held = False
scroll_bar.update(0.3)
assert scroll_bar.grabbed_slider is False
def test_redraw_scroll_bar(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.redraw_scrollbar()
assert scroll_bar.sliding_button is not None
def test_reset_scroll_position(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.reset_scroll_position()
assert scroll_bar.scroll_position == 0.0 and scroll_bar.start_percentage == 0.0
def test_set_visible_percentage(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.start_percentage = 0.9
scroll_bar.set_visible_percentage(0.2)
assert scroll_bar.visible_percentage == 0.2
scroll_bar.set_visible_percentage(-0.2)
assert scroll_bar.visible_percentage == 0.0
scroll_bar.set_visible_percentage(1.9)
assert scroll_bar.visible_percentage == 1.0
def test_kill(self, _init_pygame, default_ui_manager: IUIManagerInterface,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
assert len(default_ui_manager.get_root_container().elements) == 2
assert len(default_ui_manager.get_sprite_group().sprites()) == 6
scroll_bar_sprites = [default_ui_manager.get_root_container(),
scroll_bar,
scroll_bar.button_container,
scroll_bar.left_button,
scroll_bar.right_button,
scroll_bar.sliding_button]
assert default_ui_manager.get_sprite_group().sprites() == scroll_bar_sprites
scroll_bar.kill()
assert len(default_ui_manager.get_root_container().elements) == 0
assert len(default_ui_manager.get_sprite_group().sprites()) == 1
empty_sprites = [default_ui_manager.get_root_container()]
assert default_ui_manager.get_sprite_group().sprites() == empty_sprites
def test_process_event(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.7,
manager=default_ui_manager)
scroll_bar.hovered = True
assert scroll_bar.process_event(pygame.event.Event(pygame.MOUSEWHEEL, {'x': 0.5})) is True
assert scroll_bar.process_event(pygame.event.Event(pygame.MOUSEWHEEL, {'x': -0.5})) is True
def test_rebuild_from_theme_data_non_default(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes",
"ui_horizontal_scroll_bar_non_default.json"))
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.1,
manager=manager)
assert scroll_bar.image is not None
def test_rebuild_from_theme_data_no_arrow_buttons(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes",
"ui_horizontal_scroll_bar_no_arrows.json"))
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=0.1,
manager=manager)
assert scroll_bar.left_button is None
assert scroll_bar.right_button is None
assert scroll_bar.image is not None
@pytest.mark.filterwarnings("ignore:Invalid value")
@pytest.mark.filterwarnings("ignore:Colour hex code")
def test_rebuild_from_theme_data_bad_values(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes",
"ui_horizontal_scroll_bar_bad_values.json"))
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 100, 150, 30),
visible_percentage=1.0,
manager=manager)
assert scroll_bar.image is not None
def test_set_position(self, _init_pygame, default_ui_manager, _display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(80, 100, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
scroll_bar.set_position((200, 200))
# try to click on the scroll bar's left button
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (205, 215)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.left_button.held is True
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (395, 215)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.right_button.held is True
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (250, 215)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.sliding_button.held is True
def test_set_relative_position(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
test_container = UIContainer(relative_rect=pygame.Rect(50, 50, 300, 250),
manager=default_ui_manager)
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(80, 100, 200, 30),
visible_percentage=0.25, manager=default_ui_manager,
container=test_container)
scroll_bar.set_relative_position((50, 50))
# try to click on the scroll bar's left button
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (105, 115)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.left_button.held is True
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (295, 115)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.right_button.held is True
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (150, 115)}))
# if we successfully clicked on the moved scroll bar then this button should be True
assert scroll_bar.sliding_button.held is True
def test_set_dimensions(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
scroll_bar.set_dimensions((100, 60))
# try to click on the slider
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': (195, 40)}))
# if we successfully clicked on the moved slider then this button should be True
assert scroll_bar.right_button.held is True
def test_disable(self, _init_pygame: None, default_ui_manager: UIManager,
_display_surface_return_none: None):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(0, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
scroll_bar.disable()
# process a mouse button down event
scroll_bar.right_button.process_event(
pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': scroll_bar.right_button.rect.center}))
scroll_bar.update(0.1)
# process a mouse button up event
scroll_bar.right_button.process_event(
pygame.event.Event(pygame.MOUSEBUTTONUP,
{'button': 1, 'pos': scroll_bar.right_button.rect.center}))
assert scroll_bar.scroll_position == 0.0 and scroll_bar.is_enabled is False
def test_enable(self, _init_pygame: None, default_ui_manager: UIManager,
_display_surface_return_none: None):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(0, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
scroll_bar.disable()
scroll_bar.enable()
# process a mouse button down event
scroll_bar.right_button.process_event(
pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1, 'pos': scroll_bar.right_button.rect.center}))
scroll_bar.update(0.1)
# process a mouse button up event
scroll_bar.right_button.process_event(
pygame.event.Event(pygame.MOUSEBUTTONUP,
{'button': 1, 'pos': scroll_bar.right_button.rect.center}))
assert scroll_bar.scroll_position != 0.0 and scroll_bar.is_enabled is True
def test_show(self, _init_pygame, default_ui_manager, _display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager,
visible=0)
assert scroll_bar.visible == 0
assert scroll_bar.button_container.visible == 0
assert scroll_bar.sliding_button.visible == 0
assert scroll_bar.left_button.visible == 0
assert scroll_bar.right_button.visible == 0
scroll_bar.show()
assert scroll_bar.visible == 1
assert scroll_bar.button_container.visible == 1
assert scroll_bar.sliding_button.visible == 1
assert scroll_bar.left_button.visible == 1
assert scroll_bar.right_button.visible == 1
def test_hide(self, _init_pygame, default_ui_manager, _display_surface_return_none):
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(100, 0, 200, 30),
visible_percentage=0.25, manager=default_ui_manager)
assert scroll_bar.visible == 1
assert scroll_bar.button_container.visible == 1
assert scroll_bar.sliding_button.visible == 1
assert scroll_bar.left_button.visible == 1
assert scroll_bar.right_button.visible == 1
scroll_bar.hide()
assert scroll_bar.visible == 0
assert scroll_bar.button_container.visible == 0
assert scroll_bar.sliding_button.visible == 0
assert scroll_bar.left_button.visible == 0
assert scroll_bar.right_button.visible == 0
def test_show_hide_rendering(self, _init_pygame, default_ui_manager, _display_surface_return_none):
resolution = (400, 400)
empty_surface = pygame.Surface(resolution)
empty_surface.fill(pygame.Color(0, 0, 0))
surface = empty_surface.copy()
manager = UIManager(resolution)
scroll_bar = UIHorizontalScrollBar(relative_rect=pygame.Rect(25, 25, 375, 150),
visible_percentage=0.25,
manager=manager,
visible=0)
manager.update(0.01)
manager.draw_ui(surface)
assert compare_surfaces(empty_surface, surface)
surface.fill(pygame.Color(0, 0, 0))
scroll_bar.show()
manager.update(0.01)
manager.draw_ui(surface)
assert not compare_surfaces(empty_surface, surface)
surface.fill(pygame.Color(0, 0, 0))
scroll_bar.hide()
manager.update(0.01)
manager.draw_ui(surface)
assert compare_surfaces(empty_surface, surface)
| 49.383523
| 103
| 0.601162
| 1,937
| 17,383
| 5.080021
| 0.08983
| 0.120732
| 0.086179
| 0.053659
| 0.853659
| 0.816057
| 0.799187
| 0.777134
| 0.768293
| 0.741667
| 0
| 0.038547
| 0.326929
| 17,383
| 351
| 104
| 49.524217
| 0.802479
| 0.054824
| 0
| 0.592308
| 0
| 0
| 0.018767
| 0.007312
| 0
| 0
| 0
| 0
| 0.215385
| 1
| 0.080769
| false
| 0
| 0.038462
| 0
| 0.123077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
18d660575b74732cabd2fede25155edc59c77130
| 5,003
|
py
|
Python
|
python/algos/dssm_net.py
|
meta-soul/MetaSpore
|
e6fbc12c6a3139df76c87215b16f9dba65962ec7
|
[
"Apache-2.0"
] | 32
|
2022-03-30T10:24:00.000Z
|
2022-03-31T16:19:15.000Z
|
python/algos/dssm_net.py
|
meta-soul/MetaSpore
|
e6fbc12c6a3139df76c87215b16f9dba65962ec7
|
[
"Apache-2.0"
] | null | null | null |
python/algos/dssm_net.py
|
meta-soul/MetaSpore
|
e6fbc12c6a3139df76c87215b16f9dba65962ec7
|
[
"Apache-2.0"
] | 3
|
2022-03-30T10:28:57.000Z
|
2022-03-30T11:37:39.000Z
|
#
# Copyright 2022 DMetaSoul
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import metaspore as ms
import torch.nn.functional as F
from .layers import MLPLayer
class SimilarityModule(torch.nn.Module):
def __init__(self, tau):
super().__init__()
self.tau = tau
def forward(self, x, y):
z = torch.sum(x * y, dim=1).reshape(-1, 1)
s = torch.sigmoid(z/self.tau)
return s
class UserModule(torch.nn.Module):
def __init__(self,
column_name_path,
combine_schema_path,
embedding_dim,
sparse_init_var=1e-2,
dnn_hidden_units=[1024, 512, 256],
dnn_hidden_activations="ReLU",
use_bias=True,
net_dropout=0,
batch_norm=False,
embedding_regularizer=None,
net_regularizer=None,
ftrl_l1=1.0,
ftrl_l2=120.0,
ftrl_alpha=0.5,
ftrl_beta=1.0,
**kwargs):
super().__init__()
self.embedding_dim = embedding_dim
self.column_name_path = column_name_path
self.combine_schema_path = combine_schema_path
## sparse layers
self.sparse = ms.EmbeddingSumConcat(self.embedding_dim, self.column_name_path, self.combine_schema_path)
self.sparse.updater = ms.FTRLTensorUpdater(l1=ftrl_l1, l2=ftrl_l2, alpha = ftrl_alpha, beta=ftrl_beta)
self.sparse.initializer = ms.NormalTensorInitializer(var=sparse_init_var)
self.sparse.output_batchsize1_if_only_level0 = True
## sparse normalization
self.sparse_output_dim = self.sparse.feature_count * self.embedding_dim
self.sparse_embedding_bn = ms.nn.Normalization(self.sparse_output_dim, momentum=0.01, eps=1e-5)
## dense layers
self.dense = MLPLayer(input_dim = self.sparse_output_dim,
output_dim = None,
hidden_units = dnn_hidden_units,
hidden_activations = dnn_hidden_activations,
final_activation = None,
dropout_rates = net_dropout,
batch_norm = batch_norm,
use_bias = use_bias)
def forward(self, x):
x = self.sparse(x)
x = self.sparse_embedding_bn(x)
x = self.dense(x)
return x
class ItemModule(torch.nn.Module):
def __init__(self,
column_name_path,
combine_schema_path,
embedding_dim,
sparse_init_var=1e-2,
dnn_hidden_units=[1024, 512, 256],
dnn_hidden_activations="ReLU",
use_bias=True,
net_dropout=0,
batch_norm=False,
embedding_regularizer=None,
net_regularizer=None,
ftrl_l1=1.0,
ftrl_l2=120.0,
ftrl_alpha=0.5,
ftrl_beta=1.0,
**kwargs):
super().__init__()
self.embedding_dim = embedding_dim
self.column_name_path = column_name_path
self.combine_schema_path = combine_schema_path
## sparse layers
self.sparse = ms.EmbeddingSumConcat(self.embedding_dim, self.column_name_path, self.combine_schema_path)
self.sparse.updater = ms.FTRLTensorUpdater(l1=ftrl_l1, l2=ftrl_l2, alpha = ftrl_alpha, beta=ftrl_beta)
self.sparse.initializer = ms.NormalTensorInitializer(var=sparse_init_var)
## sparse normalization
self.sparse_output_dim = self.sparse.feature_count * self.embedding_dim
self.sparse_embedding_bn = ms.nn.Normalization(self.sparse_output_dim, momentum=0.01, eps=1e-5)
## dense layers
self.dense = MLPLayer(input_dim = self.sparse_output_dim,
output_dim = None,
hidden_units = dnn_hidden_units,
hidden_activations = dnn_hidden_activations,
final_activation = None,
dropout_rates = net_dropout,
batch_norm = batch_norm,
use_bias = use_bias)
def forward(self, x):
x = self.sparse(x)
x = self.sparse_embedding_bn(x)
x = self.dense(x)
return x
| 40.674797
| 112
| 0.585049
| 582
| 5,003
| 4.747423
| 0.256014
| 0.076004
| 0.040536
| 0.039088
| 0.74629
| 0.74629
| 0.737604
| 0.737604
| 0.737604
| 0.737604
| 0
| 0.024449
| 0.337797
| 5,003
| 122
| 113
| 41.008197
| 0.809538
| 0.128323
| 0
| 0.849462
| 0
| 0
| 0.001846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.043011
| 0
| 0.172043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7a0b7e45d69c9ff05fa4846b9cd13fc649923c99
| 168
|
py
|
Python
|
dart_fss/api/shareholder/__init__.py
|
dveamer/dart-fss
|
1ea6b937f363d604a7da9c03686fba7f66707efa
|
[
"MIT"
] | 243
|
2019-04-19T09:05:32.000Z
|
2022-03-27T10:51:24.000Z
|
dart_fss/api/shareholder/__init__.py
|
dveamer/dart-fss
|
1ea6b937f363d604a7da9c03686fba7f66707efa
|
[
"MIT"
] | 80
|
2019-04-20T06:37:44.000Z
|
2022-03-25T12:20:47.000Z
|
dart_fss/api/shareholder/__init__.py
|
dveamer/dart-fss
|
1ea6b937f363d604a7da9c03686fba7f66707efa
|
[
"MIT"
] | 92
|
2019-04-18T06:19:52.000Z
|
2022-03-17T07:43:39.000Z
|
from .executive import get_executive_shareholder
from .major_shareholder import get_major_shareholder
__all__ = ['get_executive_shareholder', 'get_major_shareholder']
| 33.6
| 64
| 0.863095
| 20
| 168
| 6.6
| 0.35
| 0.363636
| 0.348485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077381
| 168
| 5
| 64
| 33.6
| 0.851613
| 0
| 0
| 0
| 0
| 0
| 0.272189
| 0.272189
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e1b1f050f8ff33595de54a7427af9ca2b7126bc0
| 19,049
|
py
|
Python
|
commentary022021/commentary022021-figures.py
|
zhoudanxie/sinclair-xie-sentiment
|
d53d7cea724b32ea69e9c47e8a3b7cec800c7d07
|
[
"MIT"
] | null | null | null |
commentary022021/commentary022021-figures.py
|
zhoudanxie/sinclair-xie-sentiment
|
d53d7cea724b32ea69e9c47e8a3b7cec800c7d07
|
[
"MIT"
] | null | null | null |
commentary022021/commentary022021-figures.py
|
zhoudanxie/sinclair-xie-sentiment
|
d53d7cea724b32ea69e9c47e8a3b7cec800c7d07
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
import re
import numpy as np
from datetime import datetime
from sklearn.decomposition import PCA
# Plotting Packages
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib import rcParams
rcParams['font.family'] = "Times New Roman"
colors=['#033C5A','#AA9868','#0190DB','#FFC72C','#A75523','#008364','#78BE20','#C9102F',
'#033C5A','#AA9868','#0190DB','#FFC72C','#A75523','#008364','#78BE20','#C9102F']
#-----------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------Import Data--------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
# Import monthly data
monthlyIndex=pd.read_csv(r'Data\RegRelevant_MonthlySentimentIndex_Jan2021.csv')
print(monthlyIndex.info())
monthlyIndex['Year-Month']=monthlyIndex['Year'].map(str)+'-'+monthlyIndex['Month'].map(str)
monthlyIndex['date']=monthlyIndex['Year-Month'].astype('datetime64[ns]').dt.date
for dict in ['GI','LM','LSD']:
monthlyIndex[dict+'index_standardized']=(monthlyIndex[dict+'index']-np.mean(monthlyIndex[dict+'index']))/np.std(monthlyIndex[dict+'index'])
monthlyIndex['UncertaintyIndex_standardized']=(monthlyIndex['UncertaintyIndex']-np.mean(monthlyIndex['UncertaintyIndex']))/np.std(monthlyIndex['UncertaintyIndex'])
# PCA of monthly sentiment indexes
features = ['GIindex', 'LMindex', 'LSDindex']
x = monthlyIndex.loc[:, features].values
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
print("Variance explained by PC1 and PC2:", pca.explained_variance_ratio_)
print("PC1 feature weights:", pca.components_[0])
principalComponents_neg=principalComponents*(-1)
principalDf = pd.DataFrame(data = principalComponents_neg, columns = ['SentimentPC1', 'SentimentPC2'])
monthlyIndex = pd.concat([monthlyIndex, principalDf], axis = 1)
monthlyIndex['SentimentMax']=monthlyIndex[['GIindex','LMindex','LSDindex']].max(axis=1)
monthlyIndex['SentimentMin']=monthlyIndex[['GIindex','LMindex','LSDindex']].min(axis=1)
# Import weekly data
weeklyIndex=pd.read_csv(r'Data\RegRelevant_WeeklySentimentIndex_Jan2021.csv')
print(weeklyIndex.info())
weeklyIndex['date']=weeklyIndex['StartDate'].astype('datetime64[ns]').dt.date
for dict in ['GI','LM','LSD']:
weeklyIndex[dict+'index_standardized']=(weeklyIndex[dict+'index']-np.mean(weeklyIndex[dict+'index']))/np.std(weeklyIndex[dict+'index'])
weeklyIndex['UncertaintyIndex_standardized']=(weeklyIndex['UncertaintyIndex']-np.mean(weeklyIndex['UncertaintyIndex']))/np.std(weeklyIndex['UncertaintyIndex'])
# PCA of weekly sentiment indexes
features = ['GIindex', 'LMindex', 'LSDindex']
x = weeklyIndex.loc[:, features].values
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
print("Variance explained by PC1 and PC2:", pca.explained_variance_ratio_)
print("PC1 feature weights:", pca.components_[0])
principalComponents_neg=principalComponents*(-1)
principalDf = pd.DataFrame(data = principalComponents_neg, columns = ['SentimentPC1', 'SentimentPC2'])
weeklyIndex = pd.concat([weeklyIndex, principalDf], axis = 1)
#-----------------------------------------------------------------------------------------------------------------------
#---------------------------------------Plot Monthly Sentiment & Uncertainty Indexes--------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
# Plot monthly uncertainty index under Trump with weekly inset
x=monthlyIndex['date'][-49:]
y=monthlyIndex['UncertaintyIndex'][-49:]
fig, ax = plt.subplots(1, figsize=(15,8))
ax.plot(x,y,color=colors[0],marker='D',markersize=8)
# Events
ax.text(datetime(2016,12,1), 0.73, 'Transition\nof power', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2020,4,1), 0.8, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2020,11,1), 0.77, '2020 presidential\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y-%m')
#
# ax.xaxis.set_major_locator(years)
# ax.xaxis.set_major_formatter(years_fmt)
# ax.xaxis.set_minor_locator(months)
#
# # round to nearest years.
# datemin = np.datetime64(min(x), 'Y')
# datemax = np.datetime64(max(x), 'Y') + np.timedelta64(1, 'Y')
# ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Uncertainty Index',fontsize=16)
ax.set_yticks(np.arange(round(min(y),1)-0.1,round(max(y),1)+0.2,0.1))
#ax.set_ylim(bottom=round(min(y),1))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle('Figure 1: Uncertainty about Regulatory Policy',
x=0.72, y=0.95,fontsize=20)
ax.set_title('(January 2017 - January 2021)',fontsize=18,position=(0.85,1.1))
# Inset plot
xins=weeklyIndex['date'][-52:]
yins=weeklyIndex['UncertaintyIndex'][-52:]
axins=inset_axes(ax, width=5, height=2.5, bbox_to_anchor=(.05, .69, .5, .5),
bbox_transform=ax.transAxes,loc=2)
axins.plot(xins,yins,color='#033C5A',linewidth=2,marker='D',markersize=5)
axins.format_xdata = mdates.DateFormatter('%Y-%m')
axins.set_yticks(np.arange(round(min(yins),1)-0.1, round(max(yins),1)+0.2, 0.1))
axins.grid(color='gray', which='major', axis='y', linestyle='dotted')
axins.tick_params(axis='both',which='major',labelsize=10)
axins.set_facecolor('#d3d3d3')
axins.set_alpha(0.2)
axins.set_title('Weekly Index over the Past 12 Months',fontsize=14,position=(0.5,0.85))
# Adjust plot position
plt.subplots_adjust(top=0.81, bottom=0.15)
#Notes
fig.text(0.12, 0.02,'Notes: The uncertainty index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14,style='italic')
plt.savefig('Figures/Figure1.jpg', bbox_inches='tight')
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
# Plot monthly uncertainty index with events by presidential year
x=monthlyIndex['date']
y=monthlyIndex['UncertaintyIndex']
fig, ax = plt.subplots(1, figsize=(15,9))
ax.plot(x,y,color='black')
# Presidential year
ax.axvspan(datetime(1985,1,1),datetime(1989,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1987,1,1), 0.91, 'Ronald\nReagan', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1989,2,1),datetime(1993,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1991,1,1), 0.91, 'George H. W.\nBush', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1993,2,1),datetime(2001,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(1997,1,1), 0.91, 'Bill\nClinton', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2001,2,1),datetime(2009,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2005,1,1), 0.91, 'George W.\nBush', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(2009,2,1),datetime(2017,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(2013,1,1), 0.91, 'Barack\nObama', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2017,2,1),datetime(2021,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2019,1,1),0.91, 'Donald\nTrump', fontsize=13, color=colors[7],horizontalalignment='center')
# events
ax.text(datetime(2008,9,1), 0.8, 'Lehman\nBrothers', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,3,1), 0.855, 'Obamacare', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,10,1), 0.87, 'Deepwater Horizon\noil spill', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,7,1), 0.84, 'Dodd-Frank', fontsize=13, color=colors[4],horizontalalignment='left')
ax.text(datetime(2016,11,1),0.83 , '2016 presidential\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2020,1,1), 0.79, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator(2) # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# round to nearest years.
datemin = np.datetime64(monthlyIndex['date'].iloc[0], 'Y')
datemax = np.datetime64(monthlyIndex['date'].iloc[-1], 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Uncertainty Index',fontsize=16)
ax.set_yticks(np.arange(round(min(y),1),round(max(y),1)+0.1,0.1))
ax.set_ylim(bottom=round(min(y),1))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle('Figure 3: Uncertainty about Regulation by Presidential Year',
y=0.95,fontsize=20)
ax.set_title('(January 1985 - January 2021)',fontsize=18,position=(0.5,1.12))
#Notes
fig.text(0.12, 0.03,'Notes: The uncertainty index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14,style='italic')
# Adjust plot position
plt.subplots_adjust(top=0.81, bottom=0.15)
plt.savefig('Figures/Figure3.jpg', bbox_inches='tight')
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
# Plot PC1 under Trump with weekly inset
x = monthlyIndex['date'][-49:]
y = monthlyIndex['SentimentPC1'][-49:]
fig, ax = plt.subplots(1, figsize=(15, 8))
ax.plot(x,y,color=colors[0],marker='D',markersize=8)
# Events
#ax.text(datetime(2016,12,1), 0.73, 'Transition\nof Power', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2018,12,1), -0.45, 'Trump midterm\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
#ax.text(datetime(2020,3,1), -0.15, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
#ax.text(datetime(2020,12,1), 0.77, '2020 Presidential Election', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y-%m')
#
# ax.xaxis.set_major_locator(years)
# ax.xaxis.set_major_formatter(years_fmt)
# ax.xaxis.set_minor_locator(months)
#
# # round to nearest years.
# datemin = np.datetime64(min(x), 'Y')
# datemax = np.datetime64(max(x), 'Y') + np.timedelta64(1, 'Y')
# ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Sentiment Index',fontsize=16)
ax.set_yticks(np.arange(-0.8,1.4,0.4))
#ax.set_ylim(bottom=round(min(y),1))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle('Figure 2: Sentiment about Regulatory Policy',
x=0.26, y=0.95,fontsize=20)
ax.set_title('(January 2017 - January 2021)',fontsize=18,position=(0.1,1.13))
# Inset plot
xins=weeklyIndex['date'][-52:]
yins=weeklyIndex['SentimentPC1'][-52:]
axins=inset_axes(ax, width=5, height=2.5, bbox_to_anchor=(.52, .75, .5, .5),
bbox_transform=ax.transAxes,loc=2)
axins.plot(xins,yins,color='#033C5A',linewidth=2,marker='D',markersize=5)
axins.format_xdata = mdates.DateFormatter('%Y-%m')
axins.set_yticks(np.arange(-2, 3, 1))
axins.grid(color='gray', which='major', axis='y', linestyle='dotted')
axins.tick_params(axis='both',which='major',labelsize=10)
axins.set_facecolor('#d3d3d3')
axins.set_alpha(0.1)
axins.set_title('Weekly Index over the Past 12 Months',fontsize=14,position=(0.5,0.85))
# Adjust plot position
plt.subplots_adjust(top=0.79, bottom=0.15)
#Notes
fig.text(0.12, 0.02,'Notes: The sentiment index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14,style='italic')
plt.savefig("Figures/Figure2.jpg", bbox_inches='tight')
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
# Plot PC1 with events by presidential year
x = monthlyIndex['date']
y = monthlyIndex['SentimentPC1']
fig, ax = plt.subplots(1, figsize=(15, 9))
ax.plot(x, y, color='black')
# Presidential year
ax.axvspan(datetime(1985,1,1),datetime(1989,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1987,1,1), 1.6, 'Ronald\nReagan', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1989,2,1),datetime(1993,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1991,1,1), 1.6, 'George H. W.\nBush', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1993,2,1),datetime(2001,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(1997,1,1), 1.6, 'Bill\nClinton', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2001,2,1),datetime(2009,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2005,1,1), 1.6, 'George W.\nBush', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(2009,2,1),datetime(2017,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(2013,1,1), 1.6, 'Barack\nObama', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2017,2,1),datetime(2021,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2019,1,1),1.6, 'Donald\nTrump', fontsize=13, color=colors[7],horizontalalignment='center')
# events
ax.text(datetime(1993,9,1), 0.75, 'Clinton\nhealth care plan', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2001,9,1), -0.75, '9/11', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2006,11,1), 0.73, 'Bush midterm\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2008,9,1), -0.6, 'Lehman\nBrothers', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,3,1), -1, 'Obamacare', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,10,1),-1.25, 'Deepwater Horizon\noil spill', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,12,1), -1.4, 'Dodd-Frank', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2012,6,1), -1, 'Libor\nscandal', fontsize=13, color=colors[4],horizontalalignment='left')
ax.text(datetime(2016,11,1), 0.8 , '2016 presidential\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
#ax.text(datetime(2020,1,1), -0.5, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator(2) # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# round to nearest years.
datemin = np.datetime64(x.iloc[0], 'Y')
datemax = np.datetime64(x.iloc[-1], 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Sentiment Index', fontsize=16)
ax.set_yticks(np.arange(round(min(y), 0) - 0.5, round(max(y), 0) + 1, 0.5))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle("Figure 4: Sentiment about Regulation by Presidential Year",
y=0.95, fontsize=20)
ax.set_title('(January 1985 - January 2021)', fontsize=18,position=(0.5,1.12))
# Notes
fig.text(0.12, 0.03, 'Notes: The sentiment index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14, style='italic')
# Adjust plot position
plt.subplots_adjust(top=0.81, bottom=0.15)
plt.savefig("Figures/Figure4.jpg", bbox_inches='tight')
plt.show()
| 44.3
| 163
| 0.677621
| 2,662
| 19,049
| 4.791134
| 0.130729
| 0.042261
| 0.038419
| 0.057629
| 0.859966
| 0.846872
| 0.83464
| 0.823898
| 0.799592
| 0.797946
| 0
| 0.06484
| 0.089978
| 19,049
| 429
| 164
| 44.403263
| 0.670897
| 0.178172
| 0
| 0.592
| 0
| 0
| 0.249518
| 0.022426
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.056
| 0
| 0.056
| 0.024
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e1b60fa8cc323af51be5d79d363a4f67e68e4b7a
| 10,789
|
py
|
Python
|
test/test_models.py
|
tianjuchen/pyoptmat
|
6f34205f450fd884679f37522ccd0d0b65ecdb71
|
[
"MIT"
] | null | null | null |
test/test_models.py
|
tianjuchen/pyoptmat
|
6f34205f450fd884679f37522ccd0d0b65ecdb71
|
[
"MIT"
] | null | null | null |
test/test_models.py
|
tianjuchen/pyoptmat
|
6f34205f450fd884679f37522ccd0d0b65ecdb71
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn
from pyoptmat import models, flowrules, utility, hardening, damage
from pyoptmat.temperature import ConstantParameter as CP
torch.set_default_dtype(torch.float64)
class CommonModel:
def test_derivs_strain(self):
strain_rates = torch.cat(
(
torch.zeros(1, self.strains.shape[1]),
(self.strains[1:] - self.strains[:-1])
/ (self.times[1:] - self.times[:-1]),
)
)
strain_rates[torch.isnan(strain_rates)] = 0
erate_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, strain_rates
)
temperature_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, self.temperatures
)
use = models.StrainBasedModel(
self.model, erate_interpolator, temperature_interpolator
)
v, dv = use.forward(self.t, self.state_strain)
ddv = utility.new_differentiate(
lambda x: use.forward(self.t, x)[0], self.state_strain
)
self.assertTrue(np.allclose(dv, ddv, rtol=1e-4, atol=1e-4))
def test_derivs_stress(self):
stress_rates = torch.cat(
(
torch.zeros(1, self.stresses.shape[1]),
(self.stresses[1:] - self.stresses[:-1])
/ (self.times[1:] - self.times[:-1]),
)
)
stress_rates[torch.isnan(stress_rates)] = 0
stress_rate_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, stress_rates
)
stress_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, self.stresses
)
temperature_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, self.temperatures
)
use = models.StressBasedModel(
self.model,
stress_rate_interpolator,
stress_interpolator,
temperature_interpolator,
)
v, dv = use.forward(self.t, self.state_stress)
ddv = utility.new_differentiate(
lambda x: use.forward(self.t, x)[0], self.state_stress
)
self.assertTrue(np.allclose(dv, ddv, rtol=1e-4, atol=1e-4))
class TestPerfectViscoplasticity(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
/ 10.0
)
self.temperatures = torch.zeros_like(self.strains)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 0
)
self.state_strain = torch.tensor([[90.0], [100.0], [101.0]])
self.state_stress = torch.tensor([[0.0], [0.0], [0.0]])
self.t = self.times[2]
self.flowrule = flowrules.PerfectViscoplasticity(CP(self.n), CP(self.eta))
self.model = models.InelasticModel(CP(self.E), self.flowrule)
class TestIsoKinViscoplasticity(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.s0 = torch.tensor(0.0)
self.R = torch.tensor(101.0)
self.d = torch.tensor(1.3)
self.iso = hardening.VoceIsotropicHardeningModel(CP(self.R), CP(self.d))
self.C = torch.tensor(12000.0)
self.g = torch.tensor(10.1)
self.kin = hardening.FAKinematicHardeningModel(CP(self.C), CP(self.g))
self.flowrule = flowrules.IsoKinViscoplasticity(
CP(self.n), CP(self.eta), CP(self.s0), self.iso, self.kin
)
self.model = models.InelasticModel(CP(self.E), self.flowrule)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.temperatures = torch.zeros_like(self.times)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 200
)
self.state_strain = (
torch.tensor(
[[90.0, 30.0, 10.0, 0], [100.0, 10.0, 15.0, 0], [101.0, 50.0, 60.0, 0]]
)
/ 3
)
self.state_stress = (
torch.tensor(
[[0.05, 30.0, 10.0, 0], [0.07, 10.0, 15.0, 0], [0.08, 50.0, 60.0, 0]]
)
/ 3
)
self.t = self.times[2]
class TestIsoKinViscoplasticityRecovery(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.s0 = torch.tensor(0.0)
self.tau0 = torch.tensor(101.0)
self.theta0 = torch.tensor(1000.0)
self.R0 = torch.tensor(0.0)
self.r1 = torch.tensor(1.0e-6)
self.r2 = torch.tensor(2.0)
self.iso = hardening.Theta0RecoveryVoceIsotropicHardeningModel(
CP(self.tau0), CP(self.theta0), CP(self.R0), CP(self.r1), CP(self.r2)
)
self.C = torch.tensor(12000.0)
self.g = torch.tensor(10.1)
self.kin = hardening.FAKinematicHardeningModel(CP(self.C), CP(self.g))
self.flowrule = flowrules.IsoKinViscoplasticity(
CP(self.n), CP(self.eta), CP(self.s0), self.iso, self.kin
)
self.model = models.InelasticModel(CP(self.E), self.flowrule)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.temperatures = torch.zeros_like(self.times)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 200
)
self.state_strain = (
torch.tensor(
[[90.0, 30.0, 10.0, 0], [100.0, 10.0, 15.0, 0], [101.0, 50.0, 60.0, 0]]
)
/ 3
)
self.state_stress = (
torch.tensor(
[[0.05, 30.0, 10.0, 0], [0.07, 10.0, 15.0, 0], [0.08, 50.0, 60.0, 0]]
)
/ 3
)
self.t = self.times[2]
class TestDamage(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.s0 = torch.tensor(0.0)
self.R = torch.tensor(101.0)
self.d = torch.tensor(1.3)
self.iso = hardening.VoceIsotropicHardeningModel(CP(self.R), CP(self.d))
self.C = torch.tensor(1200.0)
self.g = torch.tensor(10.1)
self.kin = hardening.FAKinematicHardeningModel(CP(self.C), CP(self.g))
self.A = torch.tensor(3000.0)
self.xi = torch.tensor(6.5)
self.phi = torch.tensor(1.7)
self.dmodel = damage.HayhurstLeckie(CP(self.A), CP(self.xi), CP(self.phi))
self.flowrule = flowrules.IsoKinViscoplasticity(
CP(self.n), CP(self.eta), CP(self.s0), self.iso, self.kin
)
self.model = models.InelasticModel(
CP(self.E), self.flowrule, dmodel=self.dmodel
)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.temperatures = torch.zeros_like(self.strains)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 200
)
self.state_strain = torch.tensor(
[[90.0, 30.0, 10.0, 0.05], [100.0, 10.0, 15.0, 0.1], [20, -10.0, -10, 0.2]]
)
self.state_stress = torch.tensor(
[[0.1, 30.0, 10.0, 0.05], [0.11, 10.0, 15.0, 0.1], [0.12, -10.0, -10, 0.2]]
)
self.t = self.times[2]
class TestAll(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.s0 = torch.tensor(0.0)
self.R = torch.tensor(101.0)
self.d = torch.tensor(1.3)
self.iso = hardening.VoceIsotropicHardeningModel(CP(self.R), CP(self.d))
self.C = torch.tensor([1200.0, 200.0, 10.0])
self.g = torch.tensor([10.1, 100.0, 50.0])
self.kin = hardening.ChabocheHardeningModel(CP(self.C), CP(self.g))
self.A = torch.tensor(3000.0)
self.xi = torch.tensor(6.5)
self.phi = torch.tensor(1.7)
self.dmodel = damage.HayhurstLeckie(CP(self.A), CP(self.xi), CP(self.phi))
self.flowrule = flowrules.IsoKinViscoplasticity(
CP(self.n), CP(self.eta), CP(self.s0), self.iso, self.kin
)
self.model = models.InelasticModel(
CP(self.E), self.flowrule, dmodel=self.dmodel
)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.temperatures = torch.zeros_like(self.strains)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 200
)
self.state_strain = torch.tensor(
[
[90.0, 30.0, 10.0, 10.0, -10.0, 0.2],
[100.0, 10.0, 15.0, 5.0, -10.0, 0.3],
[101.0, 50.0, 60.0, -50.0, 10.0, 0.4],
]
)
self.state_stress = torch.tensor(
[
[0.05, 30.0, 10.0, 10.0, -10.0, 0.2],
[0.08, 10.0, 15.0, 5.0, -10.0, 0.3],
[0.07, 50.0, 60.0, -50.0, 10.0, 0.4],
]
)
self.t = self.times[2]
| 33.506211
| 87
| 0.541477
| 1,434
| 10,789
| 4.041144
| 0.095537
| 0.130975
| 0.016566
| 0.064711
| 0.836756
| 0.795168
| 0.761001
| 0.733736
| 0.733736
| 0.725453
| 0
| 0.086168
| 0.310501
| 10,789
| 321
| 88
| 33.610592
| 0.692835
| 0
| 0
| 0.533582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 1
| 0.026119
| false
| 0
| 0.026119
| 0
| 0.074627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
830b227bec06e2a2415838c9272fbe667f5d6c18
| 39,381
|
py
|
Python
|
pykg2vec/config/hyperparams.py
|
kyzhouhzau/pykg2vec
|
337a5e630f820fac7d64ef407cb92a08cd86096c
|
[
"MIT"
] | 1
|
2020-06-26T16:50:38.000Z
|
2020-06-26T16:50:38.000Z
|
pykg2vec/config/hyperparams.py
|
kyzhouhzau/pykg2vec
|
337a5e630f820fac7d64ef407cb92a08cd86096c
|
[
"MIT"
] | null | null | null |
pykg2vec/config/hyperparams.py
|
kyzhouhzau/pykg2vec
|
337a5e630f820fac7d64ef407cb92a08cd86096c
|
[
"MIT"
] | 1
|
2020-06-26T16:50:39.000Z
|
2020-06-26T16:50:39.000Z
|
"""
hyperparams.py
====================================
It provides configuration for the tunable hyper-parameter ranges for all the algorithms.
"""
from argparse import ArgumentParser
from hyperopt import hp
from hyperopt.pyll.base import scope
import numpy as np
class HyperparamterLoader:
def __init__(self):
# This hyperparameter setting aims to reproduce the experimental setup in its original papers.
self.hyperparams_paper = {
'freebase15k':
{
'transe' : {'learning_rate': 0.01,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin':1.00,'optimizer': 'sgd','sampling':"uniform",'neg_rate':1},
'transh' : {'learning_rate': 0.005,'L1_flag':False,'hidden_size':50,'batch_size':1200,'epochs':1000,'margin': 0.5,'optimizer': 'sgd','sampling':"uniform",'neg_rate':1,'C': 0.015625},
'hole' : {'learning_rate': 0.01,'L1_flag': True,'hidden_size':50,'batch_size': 512,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'transm' : {'learning_rate': 0.001,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'rescal' : {'learning_rate': 0.001,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'rotate' : {'learning_rate': 0.01,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'sme' : {'learning_rate': 0.001,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1,'bilinear':False},
'transr' : {'learning_rate': 0.001,'L1_flag': True,'ent_hidden_size':50,'rel_hidden_size':50,'batch_size': 4800,'epochs': 1000,'margin': 1.0,'optimizer': 'sgd','sampling': "bern",'neg_rate':1},
'transd' : {'learning_rate': 0.001,'L1_flag':False,'ent_hidden_size':50,'rel_hidden_size':50,'batch_size': 200,'epochs': 1000,'margin': 1.0,'optimizer': 'sgd','sampling':"uniform",'neg_rate':1},
'ntn' : {'learning_rate': 0.01,'L1_flag': True,'ent_hidden_size':64,'rel_hidden_size':32,'batch_size': 128,'epochs': 1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1}, # problematic
'slm' : {'learning_rate': 0.01,'L1_flag': True,'ent_hidden_size':64,'rel_hidden_size':32,'batch_size': 128,'epochs': 1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'kg2e' : {'learning_rate': 0.01,'L1_flag': True,'hidden_size':50,'batch_size':1440,'epochs':1000,'margin': 4.0,'optimizer': 'sgd','sampling':"uniform",'distance_measure': "kl_divergence",'cmax': 0.05,'cmin': 5.00,'neg_rate': 1},
'complex' : {'learning_rate': 0.5,'hidden_size':100,'batch_size':5000,'epochs':1000,'optimizer':'adagrad','sampling':"uniform",'neg_rate':10,'lmbda':0.0001},
'distmult': {'learning_rate': 0.1,'hidden_size':100,'batch_size':50000,'epochs':1000,'data':'Freebase15k','optimizer':'adagrad','sampling':"uniform",'neg_rate':1,'lmbda':0.0001},
}
}
self.hyperparams_paper['fb15k'] = self.hyperparams_paper['freebase15k']
def load_hyperparameter(self, dataset_name, algorithm):
d_name = dataset_name.lower()
a_name = algorithm.lower()
if d_name in self.hyperparams_paper and a_name in self.hyperparams_paper[d_name]:
params = self.hyperparams_paper[d_name][a_name]
return params
else:
raise Exception("We have not explored this experimental setting! (%s, %s)"%(dataset_name, algorithm))
class KGETuneArgParser:
"""The class defines the arguements accepted for the bayesian optimizer.
KGETuneArgParser utilizes the ArgumentParser module and add the arguments
accepted for tuning the model.
Args:
model (str): Name of the model/algorithm to be tuned.
debug (bool): If True, tunes the model in debugging mode.
Examples:
>>> from pykg2vec.config.hyperparams import KGETuneArgParser
>>> from pykg2vec.utils.bayesian_optimizer import BaysOptimizer
>>> args = KGETuneArgParser().get_args()
>>> bays_opt = BaysOptimizer(args=args)
Todo:
* Add more arguments!.
"""
def __init__(self):
self.parser = ArgumentParser(description='Knowledge Graph Embedding tunable configs.')
''' basic configs '''
self.parser.add_argument('-mn', dest='model', default='TransE', type=str, help='Model to tune')
self.parser.add_argument('-db', dest='debug', default=False, type=lambda x: (str(x).lower() == 'true'),
help='To use debug mode or not.')
self.parser.add_argument('-ds', dest='dataset_name', default='Freebase15k', type=str, help='The dataset name (choice: fb15k/wn18/wn18_rr/yago/fb15k_237/ks/nations/umls)')
self.parser.add_argument('-dsp', dest='dataset_path', default=None, type=str, help='The path to custom dataset.')
self.parser.add_argument('-mt', dest='max_number_trials', default=100, type=int, help='The maximum times of trials for bayesian optimizer.')
def get_args(self, args):
"""Gets the arguments from the console and parses it."""
return self.parser.parse_args(args)
class TransEParams:
"""This class defines the hyperameters and its ranges for tuning TranE algorithm.
TransEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 10.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [500]) # always choose 10 training epochs.
}
class TransHParams:
"""This class defines the hyperameters and its ranges for tuning TranH algorithm.
TransHParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class TransMParams:
"""This class defines the hyperameters and its ranges for tuning TranM algorithm.
TransMParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class RescalParams:
"""This class defines the hyperameters and its ranges for tuning Rescal algorithm.
Rescal defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class SMEParams:
"""This class defines the hyperameters and its ranges for tuning SME algorithm.
SME defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
bilinear (bool): List of boolean values.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'bilinear': hp.choice('bilinear', [True, False]),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
# self.bilinear = [True, False]
class TransDParams:
"""This class defines the hyperameters and its ranges for tuning TranD algorithm.
TransDParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class TransRParams:
"""This class defines the hyperameters and its ranges for tuning TranR algorithm.
TransRParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
ent_hidden_size (list): List of integer values.
rel_hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(512),1)),
'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.ent_hidden_size = [8, 16, 32, 64, 128, 256]
# self.rel_hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class NTNParams:
"""This class defines the hyperameters and its ranges for tuning NTN algorithm.
NTNParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
ent_hidden_size (list): List of integer values.
rel_hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(64),1)),
'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(64),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.ent_hidden_size = [8, 16, 32]
# self.rel_hidden_size = [8, 16, 32]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class SLMParams:
"""This class defines the hyperameters and its ranges for tuning SLM algorithm.
SLMParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
ent_hidden_size (list): List of integer values.
rel_hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(512),1)),
'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.ent_hidden_size = [8, 16, 32, 64, 128, 256]
# self.rel_hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class HoLEParams:
"""This class defines the hyperameters and its ranges for tuning HoLE algorithm.
HoLEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class RotatEParams:
"""This class defines the hyperameters and its ranges for tuning RotatE algorithm.
RotatEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class ConvEParams:
"""This class defines the hyperameters and its ranges for tuning ConvE algorithm.
ConvEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.lmbda = [0.1, 0.2]
self.feature_map_dropout = [0.1, 0.2, 0.5]
self.input_dropout = [0.1, 0.2, 0.5]
self.hidden_dropout = [0.1, 0.2, 0.5]
self.use_bias = [True, False]
self.label_smoothing = [0.1, 0.2, 0.5]
self.lr_decay = [0.95, 0.9, 0.8]
self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
self.L1_flag = [True, False]
self.hidden_size = [50]
self.batch_size = [200, 400, 600]
self.epochs = [2, 5, 10]
self.margin = [0.4, 1.0, 2.0]
self.optimizer = ["adam", "sgd", 'rms']
self.sampling = ["uniform", "bern"]
class ProjE_pointwiseParams:
"""This class defines the hyperameters and its ranges for tuning ProjE_pointwise algorithm.
ProjE_pointwise defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.lmbda = [0.1, 0.2]
self.feature_map_dropout = [0.1, 0.2, 0.5]
self.input_dropout = [0.1, 0.2, 0.5]
self.hidden_dropout = [0.1, 0.2, 0.5]
self.use_bias = [True, False]
self.label_smoothing = [0.1, 0.2, 0.5]
self.lr_decay = [0.95, 0.9, 0.8]
self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
self.L1_flag = [True, False]
self.hidden_size = [8, 16]
self.batch_size = [256, 512]
self.epochs = [2, 5, 10]
self.margin = [0.4, 1.0, 2.0]
self.optimizer = ["adam", "sgd", 'rms']
self.sampling = ["uniform", "bern"]
class KG2EParams:
"""This class defines the hyperameters and its ranges for tuning KG2E algorithm.
KG2E defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
bilinear (list): List of boolean values.
distance_measure (list): [kl_divergence or expected_likelihood]
cmax (list): List of floating point values.
cmin (list): List of floating point values.
"""
def __init__(self):
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.distance_measure = ["kl_divergence", "expected_likelihood"]
# self.cmax = [0.05, 0.1, 0.2]
# self.cmin = [5.00, 3.00, 2.00, 1.00]
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'margin': hp.uniform('margin', 0.5, 8.0),
'distance_measure': hp.choice('distance_measure', ["kl_divergence", "expected_likelihood"]),
'cmax': hp.loguniform('cmax', np.log(0.05), np.log(0.2)),
'cmin': hp.loguniform('cmin', np.log(1), np.log(5)),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
class ComplexParams:
"""This class defines the hyperameters and its ranges for tuning Complex algorithm.
Complex defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.lmbda = [0.1, 0.2]
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class DistMultParams:
"""This class defines the hyperameters and its ranges for tuning DistMult algorithm.
DistMultParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.lmbda = [0.1, 0.2]
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class TuckERParams:
"""This class defines the hyperameters and its ranges for tuning TuckER algorithm.
TuckERParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.lmbda = [0.1, 0.2]
self.feature_map_dropout = [0.1, 0.2, 0.5]
self.input_dropout = [0.1, 0.2, 0.5]
self.hidden_dropout = [0.1, 0.2, 0.5]
self.use_bias = [True, False]
self.label_smoothing = [0.1, 0.2, 0.5]
self.lr_decay = [0.95, 0.9, 0.8]
self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
self.L1_flag = [True, False]
self.hidden_size = [8, 16, 32, 64, 128, 256]
self.batch_size = [128, 256, 512]
self.epochs = [2, 5, 10]
self.margin = [0.4, 1.0, 2.0]
self.optimizer = ["adam", "sgd", 'rms']
self.sampling = ["uniform", "bern"]
class TransGParams:
"""This class defines the hyperameters and its ranges for tuning TransG algorithm.
TransGParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
training_threshold (float): List of floating point values.
ncluster (int): List of integer values.
CRP_factor (float): List of floating point values.
weight_norm (bool): List of boolean values.
"""
def __init__(self):
self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
self.L1_flag = [True, False]
self.hidden_size = [8, 16, 32, 64, 128, 256]
self.batch_size = [128, 256, 512]
self.epochs = [2, 5, 10]
self.margin = [0.4, 1.0, 2.0]
self.optimizer = ["adam", "sgd", 'rms']
self.sampling = ["uniform", "bern"]
self.training_threshold = [1.0, 2.0, 3.0]
self.ncluster = [3, 4, 5, 6, 7]
self.CRP_factor = [0.01, 0.05, 0.1]
self.weight_norm = [True, False]
| 48.738861
| 242
| 0.634849
| 5,530
| 39,381
| 4.418987
| 0.05425
| 0.046651
| 0.073659
| 0.054426
| 0.872325
| 0.862954
| 0.849122
| 0.84368
| 0.84057
| 0.838769
| 0
| 0.058837
| 0.227902
| 39,381
| 807
| 243
| 48.799257
| 0.744853
| 0.530687
| 0
| 0.659259
| 0
| 0.003704
| 0.233915
| 0.00293
| 0
| 0
| 0
| 0.001239
| 0
| 1
| 0.081481
| false
| 0
| 0.014815
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
83111ec3b5aa23a04b0ca9f68ed7f30102126b7c
| 45
|
py
|
Python
|
bills/utils.py
|
xNovax/RoomScout
|
287240a9d13f2b8f6ce9abdc95cf611671970fc3
|
[
"MIT"
] | 24
|
2020-02-01T17:22:47.000Z
|
2020-10-24T19:49:36.000Z
|
bills/utils.py
|
xNovax/RoomScout
|
287240a9d13f2b8f6ce9abdc95cf611671970fc3
|
[
"MIT"
] | 16
|
2020-02-01T14:30:15.000Z
|
2020-08-13T20:49:56.000Z
|
bills/utils.py
|
aaronspindler/RoomScout
|
287240a9d13f2b8f6ce9abdc95cf611671970fc3
|
[
"MIT"
] | 6
|
2020-02-01T22:07:46.000Z
|
2021-03-05T14:05:27.000Z
|
def notify_members_of_bill_added():
pass
| 15
| 35
| 0.777778
| 7
| 45
| 4.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 36
| 22.5
| 0.815789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8366db50ac4b1194c3df1fdec12778c65680811a
| 5,353
|
py
|
Python
|
task-7-rest-films/tests/test_views.py
|
luxlunaris/fintech-tasks
|
be865e358e4ca76f401aae06098a2825e904c3d8
|
[
"MIT"
] | null | null | null |
task-7-rest-films/tests/test_views.py
|
luxlunaris/fintech-tasks
|
be865e358e4ca76f401aae06098a2825e904c3d8
|
[
"MIT"
] | null | null | null |
task-7-rest-films/tests/test_views.py
|
luxlunaris/fintech-tasks
|
be865e358e4ca76f401aae06098a2825e904c3d8
|
[
"MIT"
] | null | null | null |
import pytest
import json
ns = "http://127.0.0.1:5000/movieratings/"
def test_users(client):
assert "No user with such id" in client.get(ns + "users/1").get_data(as_text=True)
assert b'{"users":[]}\n' == client.get(ns + "users").get_data()
assert (
"successfully"
in client.post(
ns + "users",
data=json.dumps(dict(username="string", email="string")),
content_type="application/json",
).get_data(as_text=True)
)
assert '"email":"string"' in client.get(ns + "users").get_data(as_text=True)
assert '"email":"string"' in client.get(ns + "users/1").get_data(as_text=True)
assert (
"successfully"
in client.put(
ns + "users/1",
data=json.dumps(dict(username="string1", email="string1")),
content_type="application/json",
).get_data(as_text=True)
)
assert '"email":"string1"' in client.get(ns + "users/1").get_data(as_text=True)
assert (
"No user with such id"
in client.put(
ns + "users/21",
data=json.dumps(dict(username="string1", email="string1")),
content_type="application/json",
).get_data(as_text=True)
)
assert "successfully" in client.delete(ns + "users/1").get_data(as_text=True)
assert b'{"users":[]}\n' == client.get(ns + "users").get_data()
def test_movies(client):
assert "No movie with such id" in client.get(ns + "movies/1").get_data(as_text=True)
assert b'{"movies":[]}\n' == client.get(ns + "movies").get_data()
assert (
"successfully"
in client.post(
ns + "movies",
data=json.dumps(dict(name="string", country="string", year=1000)),
content_type="application/json",
).get_data(as_text=True)
)
assert '"country":"string"' in client.get(ns + "movies").get_data(as_text=True)
assert '"country":"string"' in client.get(ns + "movies/1").get_data(as_text=True)
assert (
"successfully"
in client.put(
ns + "movies/1",
data=json.dumps(dict(name="string1", country="string1", year=1000)),
content_type="application/json",
).get_data(as_text=True)
)
assert '"name":"string1"' in client.get(ns + "movies/1").get_data(as_text=True)
assert (
"No movie with such id"
in client.put(
ns + "movies/21",
data=json.dumps(dict(name="string1", country="string1", year=1000)),
content_type="application/json",
).get_data(as_text=True)
)
assert "successfully" in client.delete(ns + "movies/1").get_data(as_text=True)
assert b'{"movies":[]}\n' == client.get(ns + "movies").get_data()
def test_ratings(client):
assert "No rating with such ids" in client.get(ns + "ratings/1/1").get_data(
as_text=True
)
assert "No such movie" in client.get(ns + "ratings/1").get_data(as_text=True)
assert b'{"ratings":[]}\n' == client.get(ns + "ratings").get_data()
assert (
"successfully"
in client.post(
ns + "users",
data=json.dumps(dict(username="string", email="string")),
content_type="application/json",
).get_data(as_text=True)
)
assert (
"successfully"
in client.post(
ns + "movies",
data=json.dumps(dict(name="string", country="string", year=1000)),
content_type="application/json",
).get_data(as_text=True)
)
assert b'{"ratings":[]}\n' in client.get(ns + "ratings/1").get_data()
assert (
"successfully"
in client.post(
ns + "ratings",
data=json.dumps(dict(user_id=1, movie_id=1, value=10)),
content_type="application/json",
).get_data(as_text=True)
)
assert '"user_id":1' in client.get(ns + "ratings").get_data(as_text=True)
assert (
"successfully"
in client.put(
ns + "ratings/1/1",
data=json.dumps(dict(user_id=1, movie_id=1, value=9)),
content_type="application/json",
).get_data(as_text=True)
)
assert '"value":9' in client.get(ns + "ratings/1/1").get_data(as_text=True)
assert '"value":9' in client.get(ns + "ratings").get_data(as_text=True)
assert (
"No rating with such ids"
in client.put(
ns + "ratings/12/12",
data=json.dumps(dict(user_id=1, movie_id=1, value=9)),
content_type="application/json",
).get_data(as_text=True)
)
assert (
"Wrong rating"
in client.post(
ns + "ratings",
data=json.dumps(dict(user_id=1, movie_id=1, value=12)),
content_type="application/json",
).get_data(as_text=True)
)
assert (
"Wrong rating"
in client.put(
ns + "ratings/1/1",
data=json.dumps(dict(user_id=1, movie_id=1, value=12)),
content_type="application/json",
).get_data(as_text=True)
)
assert "No rating with such ids" in client.get(ns + "ratings/10/10").get_data(
as_text=True
)
assert "successfully" in client.delete(ns + "ratings/1/1").get_data(as_text=True)
assert b'{"ratings":[]}\n' == client.get(ns + "ratings").get_data()
| 29.738889
| 88
| 0.571642
| 709
| 5,353
| 4.183357
| 0.078984
| 0.087323
| 0.091032
| 0.13149
| 0.95381
| 0.935604
| 0.935604
| 0.932232
| 0.877613
| 0.874579
| 0
| 0.023114
| 0.264525
| 5,353
| 179
| 89
| 29.905028
| 0.730251
| 0
| 0
| 0.652174
| 0
| 0
| 0.222492
| 0
| 0
| 0
| 0
| 0
| 0.268116
| 1
| 0.021739
| false
| 0
| 0.014493
| 0
| 0.036232
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
365dbae8dce158d422f0d16443bb209b2bc85aa2
| 33,886
|
py
|
Python
|
1_id_helice_simple_nosympy_corrected.py
|
altlnt/id_modele_reel
|
f67fdc66a207108b1fb6af0a7197bf590997cfbd
|
[
"MIT"
] | null | null | null |
1_id_helice_simple_nosympy_corrected.py
|
altlnt/id_modele_reel
|
f67fdc66a207108b1fb6af0a7197bf590997cfbd
|
[
"MIT"
] | null | null | null |
1_id_helice_simple_nosympy_corrected.py
|
altlnt/id_modele_reel
|
f67fdc66a207108b1fb6af0a7197bf590997cfbd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 7 15:59:50 2021
@author: alex
"""
import numpy as np
mass=369 #batterie
mass+=1640-114 #corps-carton
mass/=1e3
Area=np.pi*(11.0e-02)**2
r0=11e-02
rho0=1.204
kv_motor=800.0
pwmmin=1075.0
pwmmax=1950.0
U_batt=16.8
b10=14.44
# %% ####### IMPORT DATA
print("LOADING DATA...")
import pandas as pd
log_path="./logs/copter/vol12/log_real_processed.csv"
raw_data=pd.read_csv(log_path)
print("PROCESSING DATA...")
prep_data=raw_data.drop(columns=[i for i in raw_data.keys() if (("forces" in i ) or ('pos' in i) or ("joy" in i)) ])
prep_data=prep_data.drop(columns=[i for i in raw_data.keys() if (("level" in i ) or ('Unnamed' in i) or ("index" in i)) ])
# print(prep_data)
if "vol12" in log_path:
tmin,tmax=(-1,1e10)
elif "vol1" in log_path:
tmin,tmax=(41,265)
elif "vol2" in log_path:
tmin,tmax=(10,140)
prep_data=prep_data[prep_data['t']>tmin]
prep_data=prep_data[prep_data['t']<tmax]
prep_data=prep_data.reset_index()
for i in range(3):
prep_data['speed_pred[%i]'%(i)]=np.r_[prep_data['speed[%i]'%(i)].values[1:len(prep_data)],0]
prep_data['dt']=np.r_[prep_data['t'].values[1:]-prep_data['t'].values[:-1],0]
prep_data['t']-=prep_data['t'][0]
prep_data=prep_data.drop(index=[0,len(prep_data)-1])
for i in range(6):
prep_data['omega_c[%i]'%(i+1)]=(prep_data['PWM_motor[%i]'%(i+1)]-pwmmin)/(pwmmax-pwmmin)*U_batt*kv_motor*2*np.pi/60
# %% ####### Identify Thrust
def compute_single_motor_thrust_MT(c1,vak,omega,c2=0,vanilla_test=False):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
T=2*rho0*Area*eta*(eta-vak)
if vanilla_test:
T=c1*omega**2
return T
def compute_single_motor_thrust_BET(c1,vak,omega,c2=0,vanilla_test=False):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
T=rho0*Area*r0*omega*(c1*r0*omega-c2*(eta-vak))
if vanilla_test:
T=c1*omega**2
return T
def compute_acc_k(c1,c2=0,df=prep_data,vanilla=False,model="MT"):
vak=df["speed_body[2]"]
gamma=df["gamma[2]"]
if model=="MT":
T_sum=sum([compute_single_motor_thrust_MT(c1,vak,df['omega_c[%i]'%(i+1)],c2,vanilla_test=vanilla) for i in range(6)])
elif model=="BET":
T_sum=sum([compute_single_motor_thrust_BET(c1,vak,df['omega_c[%i]'%(i+1)],c2,vanilla_test=vanilla) for i in range(6)])
else:
return print("FIX MODEL")
acc_k=-T_sum/mass+gamma
return acc_k
from scipy.optimize import minimize
import matplotlib.pyplot as plt
def cost_vanilla(X):
c1=X
Y=compute_acc_k(c1,vanilla=True)
c=np.mean((Y-prep_data['acc_body_grad[2]'])**2,axis=0)
print("c1 :%f ,c2: VANILLA ,cost :%f"%(c1,c))
return c
X0_vanilla=np.array([6e-6])
sol_vanilla=minimize(cost_vanilla,X0_vanilla,method="SLSQP")
c1vanilla=sol_vanilla['x']
print("\n \n")
def cost(X):
c1,c2=X
Y=compute_acc_k(c1,c2=c2)
c=np.mean((Y-prep_data['acc_body_grad[2]'])**2,axis=0)
print("c1 :%f ,c2: %f,cost :%f"%(c1,c2,c))
return c
X0=np.zeros(2)
sol_custom=minimize(cost,X0,method="SLSQP")
c1sol,c2sol=sol_custom['x']
# %%% Comparison
f=plt.figure()
f.suptitle("No drag")
ax=f.add_subplot(2,1,1)
ax.plot(prep_data["t"],prep_data['acc_body_grad[2]'],color="black",label="log")
ax.plot(prep_data["t"],compute_acc_k(c1vanilla,vanilla=True),color="red",label="pred",alpha=0.5)
ax.plot(prep_data["t"],compute_acc_k(c1sol,c2=c2sol,model="MT"),color="blue",label="optimized, MT",alpha=0.5)
ax.plot(prep_data["t"],compute_acc_k(c1sol,c2=c2sol,model="BET"),color="green",label="optimized, MT",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
s="%f for vanilla, %f for custom model"%(sol_vanilla['fun'],sol_custom['fun'])
print(s)
ax.set_title(s)
print('\n\nCoherence with ct2=ct1*b1-2/b1 formula ?\n')
print('with the formula : ')
print("ct2=%f"%(c1sol*b10-2/b10))
print("with the identification : ")
print("ct2=%f"%(c2sol))
print('\n\nCoherence with TMT=TBET ?\n')
yrms=np.sqrt(np.mean((compute_acc_k(c1sol,c2=c2sol,model="MT")-compute_acc_k(c1sol,c2=c2sol,model="BET"))**2))
print("output difference rms : %s m/s"%(yrms))
# %% ####### Identify Thrust(with dk)
def compute_single_motor_thrust_MT_wdrag(c1,vak,omega,c2=0,vanilla_test=False):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
T=2*rho0*Area*eta*(eta-vak)
if vanilla_test:
T=c1*omega**2
return T
def compute_single_motor_thrust_BET_wdrag(c1,vak,omega,c2=0,vanilla_test=False):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
T=rho0*Area*r0*omega*(c1*r0*omega-c2*(eta-vak))
if vanilla_test:
T=c1*omega**2
return T
def compute_acc_k_wdrag(c1,dk,c2=0,df=prep_data,vanilla=False,model="MT"):
vak=df["speed_body[2]"]
gamma=df["gamma[2]"]
if model=="MT":
T_sum=sum([compute_single_motor_thrust_MT(c1,vak,df['omega_c[%i]'%(i+1)],c2,vanilla_test=vanilla) for i in range(6)])
elif model=="BET":
T_sum=sum([compute_single_motor_thrust_BET(c1,vak,df['omega_c[%i]'%(i+1)],c2,vanilla_test=vanilla) for i in range(6)])
else:
return print("FIX MODEL")
acc_k=-T_sum/mass+gamma-rho0*Area*dk*np.abs(vak)*vak
return acc_k
from scipy.optimize import minimize
import matplotlib.pyplot as plt
def cost_vanilla_wdrag(X):
c1,dk=X
Y=compute_acc_k_wdrag(c1,dk,vanilla=True)
c=np.mean((Y-prep_data['acc_body_grad[2]'])**2,axis=0)
print("c1 :%f ,c2: VANILLA , dk: %f ,cost :%f"%(c1,dk,c))
return c
X0_vanilla=np.array([6e-6,0])
sol_vanilla_drag=minimize(cost_vanilla_wdrag,X0_vanilla,method="SLSQP")
c1vanilla,dkvanilla=sol_vanilla_drag['x']
def cost_wdrag(X):
c1,c2,dk=X
Y=compute_acc_k_wdrag(c1,dk,c2=c2)
c=np.mean((Y-prep_data['acc_body_grad[2]'])**2,axis=0)
print("c1 :%f ,c2: %f, dk: %f , cost :%f"%(c1,c2,dk,c))
return c
X0=np.zeros(3)
sol_custom_drag=minimize(cost_wdrag,X0,method="SLSQP")
c1sol,c2sol,dksol=sol_custom_drag['x']
# %%% Comparison
f.suptitle("Thrust no drag / With drag")
ax=f.add_subplot(2,1,2)
ax.plot(prep_data["t"],prep_data['acc_body_grad[2]'],color="black",label="log")
ax.plot(prep_data["t"],compute_acc_k_wdrag(c1vanilla,dkvanilla,vanilla=True),color="red",label="pred",alpha=0.5)
ax.plot(prep_data["t"],compute_acc_k_wdrag(c1sol,dksol,c2=c2sol,model="MT"),color="blue",label="optimized, MT",alpha=0.5)
ax.plot(prep_data["t"],compute_acc_k_wdrag(c1sol,dksol,c2=c2sol,model="BET"),color="green",label="optimized, MT",alpha=0.5)
ax.legend()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
s="%f for vanilla, %f for custom model"%(sol_vanilla_drag['fun'],sol_custom_drag['fun'])
ax.set_title(s)
print(s)
print('\n\nCoherence with ct2=ct1*b1-2/b1 formula ?\n')
print('with the formula : ')
print("ct2=%f"%(c1sol*b10-2/b10))
print("with the identification : ")
print("ct2=%f"%(c2sol))
print('\n\nCoherence with TMT=TBET ?\n')
yrms=np.sqrt(np.mean((compute_acc_k_wdrag(c1sol,dksol,c2=c2sol,model="MT")-compute_acc_k_wdrag(c1sol,dksol,c2=c2sol,model="BET"))**2))
print("output difference rms : %s m/s"%(yrms))
# %%% Comparison
f.suptitle("Vanilla / Augmented with drag")
ax=f.add_subplot(2,1,2)
ax.plot(prep_data["t"],prep_data['acc_body_grad[2]'],color="black",label="log")
ax.plot(prep_data["t"],compute_acc_k_wdrag(c1vanilla,dkvanilla,vanilla=True),color="darkred",label="pred",alpha=0.5)
ax.plot(prep_data["t"],compute_acc_k_wdrag(c1sol,dksol,c2=c2sol,model="MT"),color="darkblue",label="optimized, MT",alpha=0.5)
ax.plot(prep_data["t"],compute_acc_k_wdrag(c1sol,dksol,c2=c2sol,model="BET"),color="darkgreen",label="optimized, MT",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
print("%f for vanilla, %f for custom model"%(sol_vanilla_drag['fun'],sol_custom_drag['fun']))
print('\n\nCoherence with ct2=ct1*b1-2/b1 formula ?\n')
print('with the formula : ')
print("ct2=%f"%(c1sol*b10-2/b10))
print("with the identification : ")
print("ct2=%f"%(c2sol))
print('\n\nCoherence with TMT=TBET ?\n')
yrms=np.sqrt(np.mean((compute_acc_k_wdrag(c1sol,dkvanilla,c2=c2sol,model="MT")-compute_acc_k_wdrag(c1sol,dkvanilla,c2=c2sol,model="BET"))**2))
print("output difference rms : %s m/s"%(yrms))
# %%% ai
# %% ai
# %%% ####### Identify pure drag
def compute_ai_od(di,df=prep_data):
vak=df["speed_body[0]"]
Fa=-rho0*Area*di*np.abs(vak)*vak
gamma=df["gamma[0]"]
return Fa+gamma
def cost_ai_onlydrag(X):
di=X
Y=compute_ai_od(di)
c=np.mean((Y-prep_data['acc_body_grad[0]'])**2,axis=0)
print("di :%f , cost :%f"%(di,c))
return c
X0_di_onlydrag=np.array([0])
sol_ai_od=minimize(cost_ai_onlydrag,X0_di_onlydrag,method="SLSQP")
di_only_=sol_ai_od['x']
print("\n \n")
# %%% ####### Identify H-force nodrag
def compute_eta(vak,omega,c1=c1sol,c2=c2sol):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
return eta
def compute_H(vak,omega,ch1,ch2):
eta=compute_eta(vak,omega)
H=rho0*Area*(ch1*r0*omega-ch2*(eta-vak))
return H
def compute_ai_H_only(ch1,ch2,df=prep_data):
vai=df["speed_body[0]"]
vak=df["speed_body[2]"]
gamma=df["gamma[0]"]
H=sum([compute_H(vak,df['omega_c[%i]'%(i+1)],ch1,ch2) for i in range(6)])
H_vect=-vai*H
return H_vect+gamma
def cost_ai_h_only(X):
ch1,ch2=X
Y=compute_ai_H_only(ch1,ch2)
c=np.mean((Y-prep_data['acc_body_grad[0]'])**2,axis=0)
print("ch1 :%f , ch2 :%f , cost :%f"%(ch1,ch2,c))
return c
X0_ai_onlyh=np.array([0,0])
sol_ai_oh=minimize(cost_ai_h_only,X0_ai_onlyh,method="SLSQP")
ch1_ai_only_,ch2_ai_only_=sol_ai_oh['x']
print("\n \n")
# %%% ####### Identify H-force wdrag
def compute_eta(vak,omega,c1=c1sol,c2=c2sol):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
return eta
def compute_H(vak,omega,ch1,ch2):
eta=compute_eta(vak,omega)
H=rho0*Area*(ch1*r0*omega-ch2*(eta-vak))
return H
def compute_ai_H_wdrag(ch1,ch2,di,df=prep_data):
vai=df["speed_body[0]"]
vak=df["speed_body[2]"]
gamma=df["gamma[0]"]
H=sum([compute_H(vak,df['omega_c[%i]'%(i+1)],ch1,ch2) for i in range(6)])
H_vect=-vai*H
Fa=-rho0*Area*di*np.abs(vai)*vai
return H_vect+gamma+Fa
def cost_ai_h_wdrag(X):
ch1,ch2,di=X
Y=compute_ai_H_wdrag(ch1,ch2,di)
c=np.mean((Y-prep_data['acc_body_grad[0]'])**2,axis=0)
print("ch1 :%f , ch2 :%f , di :%f , cost :%f"%(ch1,ch2,di,c))
return c
X0_ai_hwd=np.array([0,0,0])
sol_ai_hwd=minimize(cost_ai_h_wdrag,X0_ai_hwd,method="SLSQP")
ch1_ai_wd_,ch2_ai_wd_,di_wd_=sol_ai_hwd['x']
# %%% ####### Comparison
f=plt.figure()
f.suptitle("Ai drag vs H force fit")
ax=f.add_subplot(1,1,1)
ax.plot(prep_data["t"],prep_data['acc_body_grad[0]'],color="black",label="log")
ax.plot(prep_data["t"],compute_ai_od(di_only_),color="darkred",label="pure drag",alpha=0.5)
ax.plot(prep_data["t"],compute_ai_H_only(ch1_ai_only_,ch2_ai_only_),color="darkblue",label="pure h force",alpha=0.5)
ax.plot(prep_data["t"],compute_ai_H_wdrag(ch1_ai_only_,ch2_ai_only_,di_wd_),color="darkgreen",label="drag + h force",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
s="%f for vanilla, %f for custom model, %f for full model"%(sol_ai_od['fun'],sol_ai_oh['fun'],sol_ai_hwd['fun'])
ax.set_title(s)
print(s)
# %% aj
# %%% ####### Identify pure drag
def compute_aj_od(dj,df=prep_data):
vak=df["speed_body[1]"]
Fa=-rho0*Area*dj*np.abs(vak)*vak
gamma=df["gamma[1]"]
return Fa+gamma
def cost_aj_onlydrag(X):
dj=X
Y=compute_aj_od(dj)
c=np.mean((Y-prep_data['acc_body_grad[1]'])**2,axis=0)
print("dj :%f , cost :%f"%(dj,c))
return c
X0_dj_onlydrag=np.array([1])
sol_aj_od=minimize(cost_aj_onlydrag,X0_dj_onlydrag,method="SLSQP")
dj_only_=sol_aj_od['x']
print("\n \n")
# %%% ####### Identify H-force nodrag
def compute_eta(vak,omega,c1=c1sol,c2=c2sol):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
return eta
def compute_H(vak,omega,ch1,ch2):
eta=compute_eta(vak,omega)
H=rho0*Area*(ch1*r0*omega-ch2*(eta-vak))
return H
def compute_aj_H_only(ch1,ch2,df=prep_data):
vak=df["speed_body[2]"]
vaj=df["speed_body[1]"]
gamma=df["gamma[1]"]
H=sum([compute_H(vak,df['omega_c[%i]'%(i+1)],ch1,ch2) for i in range(6)])
H_vect=-vaj*H
return H_vect+gamma
def cost_aj_h_only(X):
ch1,ch2=X
Y=compute_aj_H_only(ch1,ch2)
c=np.mean((Y-prep_data['acc_body_grad[1]'])**2,axis=0)
print("ch1 :%f , ch2 :%f , cost :%f"%(ch1,ch2,c))
return c
X0_aj_onlyh=np.array([0,0])
sol_aj_oh=minimize(cost_aj_h_only,X0_aj_onlyh,method="SLSQP")
ch1_aj_only_,ch2_aj_only_=sol_aj_oh['x']
print("\n \n")
# %%% ####### Identify H-force wdrag
def compute_eta(vak,omega,c1=c1sol,c2=c2sol):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
return eta
def compute_H(vak,omega,ch1,ch2):
eta=compute_eta(vak,omega)
H=rho0*Area*(ch1*r0*omega-ch2*(eta-vak))
return H
def compute_aj_H_wdrag(ch1,ch2,dj,df=prep_data):
vak=df["speed_body[2]"]
vaj=df["speed_body[1]"]
gamma=df["gamma[1]"]
H=sum([compute_H(vak,df['omega_c[%i]'%(i+1)],ch1,ch2) for i in range(6)])
H_vect=-vaj*H
Fa=-rho0*Area*dj*np.abs(vaj)*vaj
return H_vect+gamma+Fa
def cost_aj_h_wdrag(X):
ch1,ch2,dj=X
Y=compute_aj_H_wdrag(ch1,ch2,dj)
c=np.mean((Y-prep_data['acc_body_grad[1]'])**2,axis=0)
print("ch1 :%f , ch2 :%f , dj :%f , cost :%f"%(ch1,ch2,dj,c))
return c
X0_aj_hwd=np.array([0,0,0])
sol_aj_hwd=minimize(cost_aj_h_wdrag,X0_aj_hwd,method="SLSQP")
ch1_aj_wd_,ch2_aj_wd_,dj_wd_=sol_aj_hwd['x']
# %%% ####### Comparison
f=plt.figure()
f.suptitle("Aj drag vs H force fit")
ax=f.add_subplot(1,1,1)
ax.plot(prep_data["t"],prep_data['acc_body_grad[1]'],color="black",label="log")
ax.plot(prep_data["t"],compute_aj_od(dj_only_),color="darkred",label="pure drag",alpha=0.5)
ax.plot(prep_data["t"],compute_aj_H_only(ch1_aj_only_,ch2_aj_only_),color="darkblue",label="pure h force",alpha=0.5)
ax.plot(prep_data["t"],compute_aj_H_wdrag(ch1_aj_only_,ch2_aj_only_,dj_wd_),color="darkgreen",label="drag +h force",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
s="%f for vanilla \n %f for custom model \n %f for full model"%(sol_aj_od['fun'],sol_aj_oh['fun'],sol_aj_hwd["fun"])
ax.set_title(s)
print(s)
# %% aij
# %%% H nodrag
def compute_aij_H_wdrag(ch1,ch2,di=0,dj=0,df=prep_data):
vai=df["speed_body[0]"]
vaj=df["speed_body[1]"]
vak=df["speed_body[2]"]
gammai=df["gamma[0]"]
gammaj=df["gamma[1]"]
H=sum([compute_H(vak,df['omega_c[%i]'%(i+1)],ch1,ch2) for i in range(6)])
H_vect=np.c_[-vai*H,-vaj*H]
Fa=-rho0*Area*np.c_[di*np.abs(vai)*vai,dj*np.abs(vaj)*vaj]
return H_vect+np.c_[gammai,gammaj]+Fa
def cost_aij_h_nodrag(X):
ch1,ch2=X
Y=compute_aij_H_wdrag(ch1,ch2,di=0,dj=0)
ci=np.mean((Y[:,0]-prep_data['acc_body_grad[0]'])**2,axis=0)
cj=np.mean((Y[:,1]-prep_data['acc_body_grad[1]'])**2,axis=0)
c=ci+cj
print("ch1 :%f , ch2 :%f , cost :%f"%(ch1,ch2,c))
return c
X0_aij_nodrag=np.array([0,0])
sol_aij_nodrag=minimize(cost_aij_h_nodrag,X0_aij_nodrag,method="SLSQP")
ch1_aij_nodrag_,ch2_aij_nodrag_=sol_aij_nodrag['x']
# %%% H wd
def cost_aij_h_wdrag(X):
ch1,ch2,di,dj=X
Y=compute_aij_H_wdrag(ch1,ch2,di,dj)
ci=np.mean((Y[:,0]-prep_data['acc_body_grad[0]'])**2,axis=0)
cj=np.mean((Y[:,1]-prep_data['acc_body_grad[1]'])**2,axis=0)
c=ci+cj
print("ch1 :%f , ch2 :%f , di :%f , dj : %f , cost :%f"%(ch1,ch2,di,dj,c))
return c
X0_aij_hwd=np.array([0,0,0,0])
sol_aij_hwd=minimize(cost_aij_h_wdrag,X0_aij_hwd,method="SLSQP")
ch1_aij_wd_,ch2_aij_wd_,di_aij_wd_,dj_aij_wd_=sol_aij_hwd['x']
# %%% Comparison ai
aind,ajnd=compute_aij_H_wdrag(ch1_aij_nodrag_,ch2_aij_nodrag_).T
aid,ajd=compute_aij_H_wdrag(ch1_aij_wd_,ch2_aij_wd_,di_aij_wd_,dj_aij_wd_).T
f=plt.figure()
f.suptitle("Aij drag vs H force fit, nodrag")
ax=f.add_subplot(1,2,1)
ax.plot(prep_data["t"],prep_data['acc_body_grad[0]'],color="black",label="log")
ax.plot(prep_data["t"],compute_ai_od(di_only_),color="darkred",label="pure drag",alpha=0.5)
ax.plot(prep_data["t"],aind,color="darkblue",label="pure h force",alpha=0.5)
ax.plot(prep_data["t"],aid,color="darkgreen",label="drag +h force",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
c_i_nd=np.mean((aind-prep_data['acc_body_grad[0]'])**2,axis=0)
c_i_d=np.mean((aid-prep_data['acc_body_grad[0]'])**2,axis=0)
s="%f for vanilla \n %f for custom model \n %f for full model"%(sol_ai_od['fun'],c_i_nd,c_i_d)
ax.set_title(s)
print(s)
# %%% Comparison aj
ax=f.add_subplot(1,2,2)
ax.plot(prep_data["t"],prep_data['acc_body_grad[1]'],color="black",label="log")
ax.plot(prep_data["t"],compute_aj_od(dj_only_),color="darkred",label="pure drag",alpha=0.5)
ax.plot(prep_data["t"],ajnd,color="darkblue",label="pure h force",alpha=0.5)
ax.plot(prep_data["t"],ajd,color="darkgreen",label="drag +h force",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
c_j_nd=np.mean((ajnd-prep_data['acc_body_grad[0]'])**2,axis=0)
c_j_d=np.mean((ajd-prep_data['acc_body_grad[0]'])**2,axis=0)
s="%f for vanilla \n %f for custom model \n %f for full model"%(sol_aj_od['fun'],c_j_nd,c_j_d)
ax.set_title(s)
print(s)
# %% aij (di_eq_dj)
# %%% H nodrag
def compute_aij_H_wdrag(ch1,ch2,di=0,dj=0,df=prep_data):
vai=df["speed_body[0]"]
vaj=df["speed_body[1]"]
vak=df["speed_body[2]"]
gammai=df["gamma[0]"]
gammaj=df["gamma[1]"]
H=sum([compute_H(vak,df['omega_c[%i]'%(i+1)],ch1,ch2) for i in range(6)])
H_vect=np.c_[-vai*H,-vaj*H]
Fa=-rho0*Area*np.c_[di*np.abs(vai)*vai,dj*np.abs(vaj)*vaj]
return H_vect+np.c_[gammai,gammaj]+Fa
# %%% H wd
def cost_aij_h_wdrag_di_eq_dj_(X):
ch1,ch2,di=X
Y=compute_aij_H_wdrag(ch1,ch2,di,di)
ci=np.mean((Y[:,0]-prep_data['acc_body_grad[0]'])**2,axis=0)
cj=np.mean((Y[:,1]-prep_data['acc_body_grad[1]'])**2,axis=0)
c=ci+cj
print("ch1 :%f , ch2 :%f , dij :%f , cost :%f"%(ch1,ch2,di,c))
return c
X0_aij_hwd_di_eq_dj_=np.array([0,0,0])
sol_aij_hwd_di_eq_dj_=minimize(cost_aij_h_wdrag_di_eq_dj_,X0_aij_hwd_di_eq_dj_,method="SLSQP")
ch1_aij_wd_di_eq_dj_,ch2_aij_wd_di_eq_dj_,dij_aij_wd_di_eq_dj_=sol_aij_hwd_di_eq_dj_['x']
# %%% Comparison ai
aind,ajnd=compute_aij_H_wdrag(ch1_aij_wd_di_eq_dj_,ch2_aij_wd_di_eq_dj_).T
aid,ajd=compute_aij_H_wdrag(ch1_aij_wd_di_eq_dj_,ch2_aij_wd_di_eq_dj_,dij_aij_wd_di_eq_dj_,dij_aij_wd_di_eq_dj_).T
f=plt.figure()
f.suptitle("Aij drag vs H force fit wdrag")
ax=f.add_subplot(1,2,1)
ax.plot(prep_data["t"],prep_data['acc_body_grad[0]'],color="black",label="log")
ax.plot(prep_data["t"],compute_ai_od(di_only_),color="darkred",label="pure drag",alpha=0.5)
ax.plot(prep_data["t"],aind,color="darkblue",label="pure h force",alpha=0.5)
ax.plot(prep_data["t"],aid,color="darkgreen",label="drag +h force",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
c_i_nd=np.mean((aind-prep_data['acc_body_grad[0]'])**2,axis=0)
c_i_d=np.mean((aid-prep_data['acc_body_grad[0]'])**2,axis=0)
s="%f for vanilla \n %f for custom model \n %f for full model"%(sol_aij_hwd_di_eq_dj_['fun'],c_i_nd,c_i_d)
ax.set_title(s)
print(s)
# %%% Comparison aj
ax=f.add_subplot(1,2,2)
ax.plot(prep_data["t"],prep_data['acc_body_grad[1]'],color="black",label="log")
ax.plot(prep_data["t"],compute_aj_od(dj_only_),color="darkred",label="pure drag",alpha=0.5)
ax.plot(prep_data["t"],ajnd,color="darkblue",label="pure h force",alpha=0.5)
ax.plot(prep_data["t"],ajd,color="darkgreen",label="drag +h force",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
c_j_nd=np.mean((ajnd-prep_data['acc_body_grad[0]'])**2,axis=0)
c_j_d=np.mean((ajd-prep_data['acc_body_grad[0]'])**2,axis=0)
s="%f for vanilla \n %f for custom model \n %f for full model"%(sol_aij_hwd_di_eq_dj_['fun'],c_j_nd,c_j_d)
ax.set_title(s)
print(s)
# %% Global
def compute_eta(vak,omega,c1=c1sol,c2=c2sol):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
return eta
def compute_H(vak,omega,ch1,ch2):
eta=compute_eta(vak,omega)
H=rho0*Area*(ch1*r0*omega-ch2*(eta-vak))
return H
def compute_single_motor_thrust_MT(c1,vak,omega,c2=0,vanilla_test=False):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
T=2*rho0*Area*eta*(eta-vak)
if vanilla_test:
T=c1*omega**2
return T
def compute_acc_k(c1,c2=0,df=prep_data,vanilla=False,model="MT"):
vak=df["speed_body[2]"]
gamma=df["gamma[2]"]
if model=="MT":
T_sum=sum([compute_single_motor_thrust_MT(c1,vak,df['omega_c[%i]'%(i+1)],c2,vanilla_test=vanilla) for i in range(6)])
elif model=="BET":
T_sum=sum([compute_single_motor_thrust_BET(c1,vak,df['omega_c[%i]'%(i+1)],c2,vanilla_test=vanilla) for i in range(6)])
else:
return print("FIX MODEL")
acc_k=-T_sum/mass+gamma
return acc_k
def compute_acc_global(ct1,ct2,ch1,ch2,di=0,dj=0,dk=0,df=prep_data):
vai=df["speed_body[0]"]
vaj=df["speed_body[1]"]
vak=df["speed_body[2]"]
gammai=df["gamma[0]"]
gammaj=df["gamma[1]"]
gammak=df["gamma[2]"]
T=sum([compute_single_motor_thrust_MT(ct1,vak,df['omega_c[%i]'%(i+1)],ct2) for i in range(6)])
H=sum([compute_H(vak,df['omega_c[%i]'%(i+1)],ch1,ch2) for i in range(6)])
H_vect=np.c_[-vai*H,-vaj*H,np.zeros(H.shape)]
T_vect=np.c_[np.zeros(T.shape),np.zeros(T.shape),T]
absva=np.sqrt(vai**2+vaj**2+vak**2)
Fa=-rho0*Area*np.c_[di*absva*vai,dj*absva*vaj,dk*absva*vak]
return -T_vect/mass+H_vect+np.c_[gammai,gammaj,gammak]+Fa
def cost_global_(X):
ct1,ct2,ch1,ch2,di,dj,dk=X
Y=compute_acc_global(ct1,ct2,ch1,ch2,di,dj,dk)
ci=np.mean((Y[:,0]-prep_data['acc_body_grad[0]'])**2/max(abs(prep_data['acc_body_grad[0]']))**2,axis=0)
cj=np.mean((Y[:,1]-prep_data['acc_body_grad[1]'])**2/max(abs(prep_data['acc_body_grad[1]']))**2,axis=0)
ck=np.mean((Y[:,2]-prep_data['acc_body_grad[2]'])**2/max(abs(prep_data['acc_body_grad[2]']))**2,axis=0)
c=ci+cj+ck
print("ct1 :%f, ct2 :%f , ch1 :%f , ch2 :%f , di :%f , dj : %f , dk : %f , cost :%f"%(ct1,ct2,ch1,ch2,di,dj,dk,c))
return c
X0_global_=np.zeros(7)
sol_global_=minimize(cost_global_,X0_global_,method="SLSQP")
ct1_global,ct2_global,ch1_global,ch2_global,di_global,dj_global,dk_global=sol_global_['x']
Y=compute_acc_global(ct1_global,ct2_global,ch1_global,ch2_global,di_global,dj_global,dk_global)
# %%% Comparison a i j k
f=plt.figure()
ax=f.add_subplot(1,3,1)
ax.plot(prep_data["t"],prep_data['acc_body_grad[0]'],color="black",label="log")
ax.plot(prep_data["t"],Y[:,0],color="darkred",label="global",alpha=0.5)
ax.legend(),ax.grid()
ax=f.add_subplot(1,3,2)
ax.plot(prep_data["t"],prep_data['acc_body_grad[1]'],color="black",label="log")
ax.plot(prep_data["t"],Y[:,1],color="darkred",label="global",alpha=0.5)
ax.legend(),ax.grid()
ax=f.add_subplot(1,3,3)
ax.plot(prep_data["t"],prep_data['acc_body_grad[2]'],color="black",label="log")
ax.plot(prep_data["t"],Y[:,2],color="darkred",label="global",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
c_i_=np.mean((Y[:,0]-prep_data['acc_body_grad[0]'])**2,axis=0)
c_j_=np.mean((Y[:,1]-prep_data['acc_body_grad[1]'])**2,axis=0)
c_k_=np.mean((Y[:,2]-prep_data['acc_body_grad[2]'])**2,axis=0)
s="%f for i \n %f for j \n %f for k"%(c_i_,c_j_,c_k_)
f.suptitle(s)
print(s)
# %% Global
def compute_eta(vak,omega,c1=c1sol,c2=c2sol):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
return eta
def compute_H(vak,omega,ch1,ch2):
eta=compute_eta(vak,omega)
H=rho0*Area*(ch1*r0*omega-ch2*(eta-vak))
return H
def compute_single_motor_thrust_MT(c1,vak,omega,c2=0,vanilla_test=False):
eta=vak/2-r0*omega*c2/4
eta=eta+0.5*np.sqrt((vak+0.5*r0*omega*c2)**2+2*c1*r0**2*omega**2)
T=2*rho0*Area*eta*(eta-vak)
if vanilla_test:
T=c1*omega**2
return T
def compute_acc_k(c1,c2=0,df=prep_data,vanilla=False,model="MT"):
vak=df["speed_body[2]"]
gamma=df["gamma[2]"]
if model=="MT":
T_sum=sum([compute_single_motor_thrust_MT(c1,vak,df['omega_c[%i]'%(i+1)],c2,vanilla_test=vanilla) for i in range(6)])
elif model=="BET":
T_sum=sum([compute_single_motor_thrust_BET(c1,vak,df['omega_c[%i]'%(i+1)],c2,vanilla_test=vanilla) for i in range(6)])
else:
return print("FIX MODEL")
acc_k=-T_sum/mass+gamma
return acc_k
def compute_acc_global(ct1,ct2,ch1,ch2,di=0,dj=0,dk=0,df=prep_data,vwi=0,vwj=0):
vai=df["speed_body[0]"]
vaj=df["speed_body[1]"]
vak=df["speed_body[2]"]
gammai=df["gamma[0]"]
gammaj=df["gamma[1]"]
gammak=df["gamma[2]"]
T=sum([compute_single_motor_thrust_MT(ct1,vak,df['omega_c[%i]'%(i+1)],ct2) for i in range(6)])
H=sum([compute_H(vak,df['omega_c[%i]'%(i+1)],ch1,ch2) for i in range(6)])
H_vect=np.c_[-vai*H,-vaj*H,np.zeros(H.shape)]
T_vect=np.c_[np.zeros(T.shape),np.zeros(T.shape),T]
absva=np.sqrt(vai**2+vaj**2+vak**2)
Fa=-rho0*Area*np.c_[di*absva*vai,dj*absva*vaj,dk*absva*vak]
return -T_vect/mass+H_vect+np.c_[gammai,gammaj,gammak]+Fa
def cost_global_dij_(X):
ct1,ct2,ch1,ch2,dij,dk=X
Y=compute_acc_global(ct1,ct2,ch1,ch2,dij,dij,dk)
ci=np.mean((Y[:,0]-prep_data['acc_body_grad[0]'])**2/max(abs(prep_data['acc_body_grad[0]']))**2,axis=0)
cj=np.mean((Y[:,1]-prep_data['acc_body_grad[1]'])**2/max(abs(prep_data['acc_body_grad[1]']))**2,axis=0)
ck=np.mean((Y[:,2]-prep_data['acc_body_grad[2]'])**2/max(abs(prep_data['acc_body_grad[2]']))**2,axis=0)
c=ci+cj+ck
print("ct1 :%f, ct2 :%f , ch1 :%f , ch2 :%f , di :%f , dj : %f , dk : %f , cost :%f"%(ct1,ct2,ch1,ch2,dij,dij,dk,c))
return c
X0_global_dij_=np.zeros(6)
sol_global_dij_=minimize(cost_global_dij_,X0_global_dij_,method="SLSQP")
ct1_global,ct2_global,ch1_global,ch2_global,di_global,dk_global=sol_global_dij_['x']
dj_global=di_global
Y=compute_acc_global(ct1_global,ct2_global,ch1_global,ch2_global,di_global,dj_global,dk_global)
# %%% Comparison a i j k ij equal
f=plt.figure()
ax=f.add_subplot(3,1,1)
ax.plot(prep_data["t"],prep_data['acc_body_grad[0]'],color="black",label="log")
ax.plot(prep_data["t"],Y[:,0],color="darkred",label="global",alpha=0.5)
ax.legend(),ax.grid()
ax=f.add_subplot(3,1,2)
ax.plot(prep_data["t"],prep_data['acc_body_grad[1]'],color="black",label="log")
ax.plot(prep_data["t"],Y[:,1],color="darkred",label="global",alpha=0.5)
ax.legend(),ax.grid()
ax=f.add_subplot(3,1,3)
ax.plot(prep_data["t"],prep_data['acc_body_grad[2]'],color="black",label="log")
ax.plot(prep_data["t"],Y[:,2],color="darkred",label="global",alpha=0.5)
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
c_i_=np.mean((Y[:,0]-prep_data['acc_body_grad[0]'])**2,axis=0)
c_j_=np.mean((Y[:,1]-prep_data['acc_body_grad[1]'])**2,axis=0)
c_k_=np.mean((Y[:,2]-prep_data['acc_body_grad[2]'])**2,axis=0)
s="IJ EQUAL \n %f for i \n %f for j \n %f for k"%(c_i_,c_j_,c_k_)
f.suptitle(s)
print(s)
# %% WITH WIND
import transforms3d as tf3d
def compute_acc_global_wind(ct1,ct2,ch1,ch2,di=0,dj=0,dk=0,df=prep_data,vwi=0,vwj=0):
q0,q1,q2,q3=(prep_data['q[0]'],prep_data['q[1]'],
prep_data['q[2]'],prep_data['q[3]'])
"precomputing transposition"
R_transpose=np.array([tf3d.quaternions.quat2mat([i,j,k,l]).T for i,j,k,l in zip(q0,q1,q2,q3)])
vw_earth=np.array([vwi,vwj,0])
vw_body=R_transpose@vw_earth
vai=df["speed_body[0]"]-vw_body[:,0]
vaj=df["speed_body[1]"]-vw_body[:,1]
vak=df["speed_body[2]"]-vw_body[:,2]
gammai=df["gamma[0]"]
gammaj=df["gamma[1]"]
gammak=df["gamma[2]"]
T=sum([compute_single_motor_thrust_MT(ct1,vak,df['omega_c[%i]'%(i+1)],ct2) for i in range(6)])
H=sum([compute_H(vak,df['omega_c[%i]'%(i+1)],ch1,ch2) for i in range(6)])
H_vect=np.c_[-vai*H,-vaj*H,np.zeros(H.shape)]
T_vect=np.c_[np.zeros(T.shape),np.zeros(T.shape),T]
absva=np.sqrt(vai**2+vaj**2+vak**2)
Fa=-rho0*Area*np.c_[di*absva*vai,dj*absva*vaj,dk*absva*vak]
return -T_vect/mass+H_vect+np.c_[gammai,gammaj,gammak]+Fa
def cost_global_dij_wind_(X):
ct1,ct2,ch1,ch2,dij,dk,vwi,vwj=X
Y=compute_acc_global_wind(ct1,ct2,ch1,ch2,dij,dij,dk,vwi=vwi,vwj=vwj)
ci=np.mean((Y[:,0]-prep_data['acc_body_grad[0]'])**2/max(abs(prep_data['acc_body_grad[0]']))**2,axis=0)
cj=np.mean((Y[:,1]-prep_data['acc_body_grad[1]'])**2/max(abs(prep_data['acc_body_grad[1]']))**2,axis=0)
ck=np.mean((Y[:,2]-prep_data['acc_body_grad[2]'])**2/max(abs(prep_data['acc_body_grad[2]']))**2,axis=0)
c=ci+cj+ck
print("ct1 :%f, ct2 :%f , ch1 :%f , ch2 :%f , di :%f , dj : %f , dk : %f , vwi : %f ,vwj : %f cost :%f"%(ct1,ct2,ch1,ch2,dij,dij,dk,vwi,vwj,c))
return c
X0_global_dij_wind_=np.zeros(8)
sol_global_dij_wind_=minimize(cost_global_dij_wind_,X0_global_dij_wind_,method="SLSQP")
ct1_global,ct2_global,ch1_global,ch2_global,di_global,dk_global,vwi_global_,vwj_global_=sol_global_dij_wind_['x']
dj_global=di_global
Y=compute_acc_global_wind(ct1_global,ct2_global,ch1_global,ch2_global,di_global,dj_global,dk_global,vwi=vwi_global_,vwj=vwj_global_)
f=plt.figure()
ax=f.add_subplot(3,1,1)
ax.plot(prep_data["t"],prep_data['acc_body_grad[0]'],color="black",label="log")
ax.plot(prep_data["t"],Y[:,0],color="darkred",label="global")
ax.legend(),ax.grid()
ax=f.add_subplot(3,1,2)
ax.plot(prep_data["t"],prep_data['acc_body_grad[1]'],color="black",label="log")
ax.plot(prep_data["t"],Y[:,1],color="darkred",label="global")
ax.legend(),ax.grid()
ax=f.add_subplot(3,1,3)
ax.plot(prep_data["t"],prep_data['acc_body_grad[2]'],color="black",label="log")
ax.plot(prep_data["t"],Y[:,2],color="darkred",label="global")
ax.legend(),ax.grid()
print("\nPerformances: ")
print("RMS error on acc pred is : ")
# c_i_=np.sqrt(np.mean((Y[:,0]-prep_data['acc_body_grad[0]'])**2,axis=0))
# c_j_=np.sqrt(np.mean((Y[:,1]-prep_data['acc_body_grad[1]'])**2,axis=0) )
# c_k_=np.sqrt(np.mean((Y[:,2]-prep_data['acc_body_grad[2]'])**2,axis=0) )
c_i_=np.mean(np.abs(Y[:,0]-prep_data['acc_body_grad[0]']),axis=0)
c_j_=np.mean(np.abs(Y[:,1]-prep_data['acc_body_grad[1]']),axis=0)
c_k_=np.mean(np.abs(Y[:,2]-prep_data['acc_body_grad[2]']),axis=0)
s="WIND \n %f for i \n %f for j \n %f for k"%(c_i_,c_j_,c_k_)
f.suptitle(s)
print(s)
# %% Synthesis
bilan=pd.DataFrame(data=None,
columns=['ct1','ct2',
'ch1','ch2',
'di','dj','dk','vwi','vwj',
'cost'],
index=['vanilla','custom',
'vanilla_dk','custom_with_dk',
'ai_drag','ai_h','ai_drag_and_h',
'aj_drag','aj_h','aj_drag_and_h',
'aij_h','aij_h_and_drag',
'aij_h_drag_equal_coeffs',
"global","global_equal_coeffs","global_wind"])
bilan.loc["vanilla"]['ct1','cost']=np.r_[sol_vanilla['x'],sol_vanilla['fun']]
bilan.loc["custom"]['ct1','ct2','cost']=np.r_[sol_custom['x'],sol_custom['fun']]
bilan.loc["vanilla_dk"]['ct1','dk','cost']=np.r_[sol_vanilla_drag['x'],sol_vanilla_drag['fun']]
bilan.loc["custom_with_dk"]['ct1','ct2','dk','cost']=np.r_[sol_custom_drag['x'],sol_custom_drag['fun']]
bilan.loc['ai_drag']['di','cost']=np.r_[sol_ai_od['x'],sol_ai_od['fun']]
bilan.loc['ai_h']['ch1','ch2','cost']=np.r_[sol_ai_oh['x'],sol_ai_oh['fun']]
bilan.loc['ai_drag_and_h']['ch1','ch2','di','cost']=np.r_[sol_ai_hwd['x'],sol_ai_hwd['fun']]
bilan.loc['aj_drag']['dj','cost']=np.r_[sol_aj_od['x'],sol_aj_od['fun']]
bilan.loc['aj_h']['ch1','ch2','cost']=np.r_[sol_aj_oh['x'],sol_aj_oh['fun']]
bilan.loc['aj_drag_and_h']['ch1','ch2','dj','cost']=np.r_[sol_aj_hwd['x'],sol_aj_hwd['fun']]
bilan.loc['aij_h']['ch1','ch2','cost']=np.r_[sol_aij_nodrag['x'],sol_aij_nodrag['fun']]
bilan.loc['aij_h_and_drag']['ch1','ch2','di','dj','cost']=np.r_[sol_aij_hwd['x'],sol_aij_hwd['fun']]
bilan.loc['aij_h_drag_equal_coeffs']['ch1','ch2','di','cost']=np.r_[sol_aij_hwd_di_eq_dj_['x'],sol_aij_hwd_di_eq_dj_['fun']]
bilan.loc['aij_h_drag_equal_coeffs']['dj']=bilan.loc['aij_h_drag_equal_coeffs']['di']
bilan.loc['global']['ct1','ct2',
'ch1','ch2',
'di','dj','dk',
'cost']=np.r_[sol_global_['x'],sol_global_['fun']]
bilan.loc['global_equal_coeffs']['ct1','ct2',
'ch1','ch2',
'di','dk',
'cost']=np.r_[sol_global_dij_['x'],sol_global_dij_['fun']]
bilan.loc['global_equal_coeffs']["dj"]=bilan.loc['global_equal_coeffs']["di"]
bilan.loc['global_wind']['ct1','ct2',
'ch1','ch2',
'di','dk','vwi','vwj',
'cost']=np.r_[sol_global_dij_wind_['x'],sol_global_dij_wind_['fun']]
bilan.loc['global_wind']["dj"]=bilan.loc['global_wind']["di"]
print(bilan)
| 30.527928
| 147
| 0.654548
| 6,581
| 33,886
| 3.147698
| 0.043762
| 0.065653
| 0.038233
| 0.052136
| 0.868356
| 0.826068
| 0.789283
| 0.748926
| 0.725078
| 0.712527
| 0
| 0.044588
| 0.123709
| 33,886
| 1,109
| 148
| 30.555455
| 0.653028
| 0.027829
| 0
| 0.632857
| 0
| 0.004286
| 0.191553
| 0.004083
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07
| false
| 0
| 0.01
| 0
| 0.155714
| 0.121429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
366568d751e447a07278169fb45281db86a49691
| 1,425
|
py
|
Python
|
Calebs Tests/POV VISION/FFMpegWraper.py
|
cboy116/Team-4480-Code-2018
|
8c4bd92bc65695544e8a176205d9685e80e3dcb2
|
[
"MIT"
] | 4
|
2018-01-14T01:14:13.000Z
|
2018-10-06T03:07:26.000Z
|
Calebs Tests/POV VISION/FFMpegWraper.py
|
cboy116/Team-4480-Code-2018
|
8c4bd92bc65695544e8a176205d9685e80e3dcb2
|
[
"MIT"
] | 16
|
2018-01-14T01:15:49.000Z
|
2018-03-09T17:39:38.000Z
|
Calebs Tests/POV VISION/FFMpegWraper.py
|
cboy116/Team-4480-Code-2018
|
8c4bd92bc65695544e8a176205d9685e80e3dcb2
|
[
"MIT"
] | 4
|
2018-01-29T20:27:01.000Z
|
2018-10-06T03:07:23.000Z
|
import subprocess as sp
#the cmd>>>>> ffmpeg -f dshow -pixel_format yuyv422 -i video="USB_Camera" -vcodec libx264 -f h264 -preset fast -tune zerolatency pipe:1
# ffmpeg -f dshow -pixel_format yuyv422 -i video="USB_Camera" -vcodec libx264 -f h264 -preset ultrafast -tune zerolatency -threads 4 -f mpegts udp://192.168.56.1:8888
#\ -x264opts crf=20:vbv-maxrate=3000:vbv-bufsize=100:intra-refresh=1:slice-max-size=1500:keyint=30:ref=1 \
#ffmpeg -f dshow -pixel_format yuyv422 -i video="USB_Camera" -vcodec libx264 -f h264 -preset ultrafast -tune zerolatency -threads 4 \ -x264opts crf=20:vbv-maxrate=3000:vbv-bufsize=100:intra-refresh=1:slice-max-size=1500:keyint=30:ref=1 \ -f mpegts udp://192.168.56.1:8888
# ffmpeg -f dshow -pixel_format yuyv422 -i video="USB_Camera" -vcodec libx264 -f h264 -preset ultrafast -tune zerolatency -threads 4 -f mpegts rtsp://192.168.56.1:8888
#-f rtp rtp://10.0.0.2:6005
#ffmpeg -f dshow -pixel_format yuyv422 -i video="USB_Camera" -vcodec libx264 -f h264 -preset ultrafast -tune zerolatency -threads 4 http://localhost:8090/feed1.ffm
#http://localhost:8090/feed1.ffm
#varible settings
frameRate = 30
resolutionX = 1920
resolutionY = 1080
filePath = "ffmpeg.exe"
cmd = [filePath,"-f","dshow","-video_size","1280x720","-framerate","24","-pixel_format",
"yuyv422","-i",'video="USB_Camera"',"-vcodec","libx264","-f","h264","-preset","fast","-tune","zerolatency","pipe:1"]
| 49.137931
| 271
| 0.724912
| 225
| 1,425
| 4.533333
| 0.324444
| 0.035294
| 0.105882
| 0.111765
| 0.839216
| 0.777451
| 0.777451
| 0.777451
| 0.739216
| 0.739216
| 0
| 0.13959
| 0.110175
| 1,425
| 28
| 272
| 50.892857
| 0.664827
| 0.757895
| 0
| 0
| 0
| 0
| 0.419643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
36ed138dc0dcb78ec76943c38fb4f21c48ab23a6
| 87
|
py
|
Python
|
app/api/__init__.py
|
cmyui/gulag
|
ff3b39fb6304354694379c3f8cc74dfb73e670ce
|
[
"MIT"
] | 187
|
2020-07-27T18:59:35.000Z
|
2022-02-02T16:15:13.000Z
|
app/api/__init__.py
|
cmyui/gulag
|
ff3b39fb6304354694379c3f8cc74dfb73e670ce
|
[
"MIT"
] | 119
|
2020-08-15T16:32:50.000Z
|
2022-02-02T05:19:55.000Z
|
app/api/__init__.py
|
cmyui/gulag
|
ff3b39fb6304354694379c3f8cc74dfb73e670ce
|
[
"MIT"
] | 123
|
2020-07-23T21:47:52.000Z
|
2022-02-05T13:59:32.000Z
|
# type: ignore
from . import ava
from . import cho
from . import map
from . import osu
| 14.5
| 17
| 0.712644
| 14
| 87
| 4.428571
| 0.571429
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218391
| 87
| 5
| 18
| 17.4
| 0.911765
| 0.137931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1880f531c1ed6fb7ac8d5f47440c0581380c602c
| 43
|
py
|
Python
|
colosseum/agents/episodic/__init__.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/agents/episodic/__init__.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/agents/episodic/__init__.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
from colosseum.agents.episodic import psrl
| 21.5
| 42
| 0.860465
| 6
| 43
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a12c59bb59f465304c03c242896ff386dbca5994
| 168
|
py
|
Python
|
modules/regularizers/regularizer.py
|
df424/ml
|
e12232ca4b90f983bfb14718afd314d3d6cc1bf9
|
[
"MIT"
] | null | null | null |
modules/regularizers/regularizer.py
|
df424/ml
|
e12232ca4b90f983bfb14718afd314d3d6cc1bf9
|
[
"MIT"
] | null | null | null |
modules/regularizers/regularizer.py
|
df424/ml
|
e12232ca4b90f983bfb14718afd314d3d6cc1bf9
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
import numpy as np
class Regularizer(ABC):
@abstractmethod
def regularize(self, weights: np.ndarray) -> None:
pass
| 21
| 54
| 0.708333
| 21
| 168
| 5.666667
| 0.761905
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 168
| 8
| 55
| 21
| 0.901515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a14e26d59959c53b18043c8d35179c0477c46317
| 324
|
py
|
Python
|
qdeep/utils/__init__.py
|
Talendar/qdeep
|
7228edc9cc7d7e6c6bc59e93a3eb726fda15704d
|
[
"MIT"
] | null | null | null |
qdeep/utils/__init__.py
|
Talendar/qdeep
|
7228edc9cc7d7e6c6bc59e93a3eb726fda15704d
|
[
"MIT"
] | null | null | null |
qdeep/utils/__init__.py
|
Talendar/qdeep
|
7228edc9cc7d7e6c6bc59e93a3eb726fda15704d
|
[
"MIT"
] | null | null | null |
from qdeep.utils.env_loop import EnvironmentLoop
from qdeep.utils.utils import find_best_policy
from qdeep.utils.utils import format_eta
from qdeep.utils.utils import save_module
from qdeep.utils.utils import restore_module
from qdeep.utils.visualization import visualize_policy
from acme.tf.networks import DQNAtariNetwork
| 40.5
| 54
| 0.87037
| 49
| 324
| 5.612245
| 0.428571
| 0.196364
| 0.305455
| 0.276364
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08642
| 324
| 7
| 55
| 46.285714
| 0.929054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a1b771951bfc84097a9c597d631d8f47ac3c3505
| 189
|
py
|
Python
|
src/Laptop.py
|
TestowanieAutomatyczneUG/laboratorium-9-cati97
|
27b2a515cd1887f1b35671ddb273b22cc7e04373
|
[
"MIT"
] | null | null | null |
src/Laptop.py
|
TestowanieAutomatyczneUG/laboratorium-9-cati97
|
27b2a515cd1887f1b35671ddb273b22cc7e04373
|
[
"MIT"
] | null | null | null |
src/Laptop.py
|
TestowanieAutomatyczneUG/laboratorium-9-cati97
|
27b2a515cd1887f1b35671ddb273b22cc7e04373
|
[
"MIT"
] | null | null | null |
class Laptop:
def getTime(self):
pass
def playWavFile(self, file):
pass
def wavWasPlayed(self, file):
pass
def resetWav(self, file):
pass
| 14.538462
| 33
| 0.555556
| 21
| 189
| 5
| 0.47619
| 0.2
| 0.342857
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.359788
| 189
| 12
| 34
| 15.75
| 0.867769
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0.444444
| 0
| 0
| 0.555556
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
a1c3e06d55ed9b5f9bd5121267eacdb543dac038
| 6,096
|
py
|
Python
|
dojo/unittests/test_ossindex_devaudit_parser.py
|
uditmishra128/django-DefectDojo
|
a009b08f97a5f5ee5096cff8e0b17e1ed934df72
|
[
"BSD-3-Clause"
] | 3
|
2020-07-15T12:57:14.000Z
|
2020-10-14T14:32:40.000Z
|
dojo/unittests/test_ossindex_devaudit_parser.py
|
uditmishra128/django-DefectDojo
|
a009b08f97a5f5ee5096cff8e0b17e1ed934df72
|
[
"BSD-3-Clause"
] | 20
|
2020-10-12T09:59:55.000Z
|
2021-03-22T08:31:00.000Z
|
dojo/unittests/test_ossindex_devaudit_parser.py
|
sandeshreads/dojotest
|
10f309c00c822e5200458c7fa4e1e33de8850a81
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase
from dojo.tools.ossindex_devaudit.parser import OssIndexDevauditParser
from dojo.models import Test
class TestOssIndexDevauditParser(TestCase):
def test_ossindex_devaudit_parser_without_file_has_no_findings(self):
parser = OssIndexDevauditParser(None, Test())
self.assertEqual(0, len(parser.items))
def test_ossindex_devaudit_parser_with_no_vulns_has_no_findings(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_no_vuln.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
self.assertEqual(0, len(parser.items))
def test_ossindex_devaudit_parser_with_one_critical_vuln_has_one_finding(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_one_vuln.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
self.assertEqual(1, len(parser.items))
def test_ossindex_devaudit_parser_with_multiple_vulns_has_multiple_finding(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_multiple_vulns.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
self.assertTrue(len(parser.items) > 1)
def test_ossindex_devaudit_parser_with_no_cve_returns_info_severity(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_vuln_no_cvssscore.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
self.assertTrue(len(parser.items) == 1)
def test_ossindex_devaudit_parser_with_reference_shows_reference(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_one_vuln.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.references != "")
def test_ossindex_devaudit_parser_with_empty_reference_shows_empty_reference(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_empty_reference.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.references == "")
def test_ossindex_devaudit_parser_with_missing_reference_shows_empty(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_missing_reference.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.references == "")
def test_ossindex_devaudit_parser_with_missing_cwe_shows_1035(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_missing_cwe.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.cwe == 1035)
def test_ossindex_devaudit_parser_with_null_cwe_shows_1035(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_null_cwe.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.cwe == 1035)
def test_ossindex_devaudit_parser_with_empty_cwe_shows_1035(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_empty_cwe.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.cwe == 1035)
def test_ossindex_devaudit_parser_get_severity_shows_info(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_severity_info.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.severity == "Info")
def test_ossindex_devaudit_parser_get_severity_shows_critical(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_severity_critical.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.severity == "Critical")
def test_ossindex_devaudit_parser_get_severity_shows_high(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_severity_high.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.severity == "High")
def test_ossindex_devaudit_parser_get_severity_shows_medium(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_severity_medium.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.severity == "Medium")
def test_ossindex_devaudit_parser_get_severity_shows_low(self):
testfile = open("dojo/unittests/scans/ossindex_devaudit_sample/ossindex_devaudit_severity_low.json")
parser = OssIndexDevauditParser(testfile, Test())
testfile.close()
if len(parser.items) > 0:
for item in parser.items:
self.assertTrue(item.severity == "Low")
| 49.16129
| 114
| 0.695702
| 679
| 6,096
| 5.944035
| 0.092784
| 0.186323
| 0.092666
| 0.091179
| 0.883796
| 0.876611
| 0.876611
| 0.873142
| 0.824579
| 0.812934
| 0
| 0.008363
| 0.215387
| 6,096
| 123
| 115
| 49.560976
| 0.835459
| 0
| 0
| 0.586538
| 0
| 0
| 0.208271
| 0.204085
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.153846
| false
| 0
| 0.028846
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.