hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
523fb5a3756a3fc268fd9b81ca6a21b90f7a96bb
| 52
|
py
|
Python
|
pytourney/tie/__init__.py
|
SzieberthAdam/pysportsscheduling
|
256fb2505fb93f795226ae58513c0ce8c31d722e
|
[
"MIT"
] | null | null | null |
pytourney/tie/__init__.py
|
SzieberthAdam/pysportsscheduling
|
256fb2505fb93f795226ae58513c0ce8c31d722e
|
[
"MIT"
] | null | null | null |
pytourney/tie/__init__.py
|
SzieberthAdam/pysportsscheduling
|
256fb2505fb93f795226ae58513c0ce8c31d722e
|
[
"MIT"
] | null | null | null |
from . import hth_quilici
from . import hth_sweep
| 17.333333
| 26
| 0.769231
| 8
| 52
| 4.75
| 0.625
| 0.526316
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 52
| 2
| 27
| 26
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
526571c45e95d4bbbe0c87a3b1f3ee18a52b8c86
| 15,129
|
py
|
Python
|
vdp/mgmt/v1alpha/mgmt_service_pb2_grpc.py
|
instill-ai/protogen-python
|
6e118d34566b8d59e8bcd40e0ae28e0fc1a5d50f
|
[
"Apache-2.0"
] | 1
|
2022-03-22T09:09:46.000Z
|
2022-03-22T09:09:46.000Z
|
vdp/mgmt/v1alpha/mgmt_service_pb2_grpc.py
|
instill-ai/protogen-python
|
6e118d34566b8d59e8bcd40e0ae28e0fc1a5d50f
|
[
"Apache-2.0"
] | 4
|
2022-03-16T12:36:12.000Z
|
2022-03-22T10:53:12.000Z
|
vdp/mgmt/v1alpha/mgmt_service_pb2_grpc.py
|
instill-ai/protogen-python
|
6e118d34566b8d59e8bcd40e0ae28e0fc1a5d50f
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from vdp.mgmt.v1alpha import healthcheck_pb2 as vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2
from vdp.mgmt.v1alpha import mgmt_pb2 as vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2
class UserServiceStub(object):
"""User service responds to incoming user requests.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Liveness = channel.unary_unary(
'/vdp.mgmt.v1alpha.UserService/Liveness',
request_serializer=vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.LivenessRequest.SerializeToString,
response_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.LivenessResponse.FromString,
)
self.Readiness = channel.unary_unary(
'/vdp.mgmt.v1alpha.UserService/Readiness',
request_serializer=vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.ReadinessRequest.SerializeToString,
response_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.ReadinessResponse.FromString,
)
self.ListUser = channel.unary_unary(
'/vdp.mgmt.v1alpha.UserService/ListUser',
request_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.ListUserRequest.SerializeToString,
response_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.ListUserResponse.FromString,
)
self.CreateUser = channel.unary_unary(
'/vdp.mgmt.v1alpha.UserService/CreateUser',
request_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.CreateUserRequest.SerializeToString,
response_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.CreateUserResponse.FromString,
)
self.GetUser = channel.unary_unary(
'/vdp.mgmt.v1alpha.UserService/GetUser',
request_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.GetUserRequest.SerializeToString,
response_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.GetUserResponse.FromString,
)
self.UpdateUser = channel.unary_unary(
'/vdp.mgmt.v1alpha.UserService/UpdateUser',
request_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.UpdateUserRequest.SerializeToString,
response_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.UpdateUserResponse.FromString,
)
self.DeleteUser = channel.unary_unary(
'/vdp.mgmt.v1alpha.UserService/DeleteUser',
request_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.DeleteUserRequest.SerializeToString,
response_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.DeleteUserResponse.FromString,
)
self.LookUpUser = channel.unary_unary(
'/vdp.mgmt.v1alpha.UserService/LookUpUser',
request_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.LookUpUserRequest.SerializeToString,
response_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.LookUpUserResponse.FromString,
)
class UserServiceServicer(object):
"""User service responds to incoming user requests.
"""
def Liveness(self, request, context):
"""Liveness method receives a LivenessRequest message and returns a
LivenessResponse message.
See https://github.com/grpc/grpc/blob/master/doc/health-checking.md
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Readiness(self, request, context):
"""Readiness method receives a ReadinessRequest message and returns a
ReadinessResponse message.
See https://github.com/grpc/grpc/blob/master/doc/health-checking.md
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListUser(self, request, context):
"""ListUser method receives a ListUserRequest message and returns a
ListUserResponse message.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateUser(self, request, context):
"""CreateUser receives a CreateUserRequest message and returns a
aGetUserResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetUser(self, request, context):
"""GetUser method receives a GetUserRequest message and returns
a GetUserResponse message.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateUser(self, request, context):
"""UpdateUser method receives a UpdateUserRequest message and returns
a UpdateUserResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteUser(self, request, context):
"""DeleteUser method receives a DeleteUserRequest message and returns a
DeleteUserResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LookUpUser(self, request, context):
"""LookUpUser method receives a LookUpUserRequest message and returns a
LookUpUserResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Liveness': grpc.unary_unary_rpc_method_handler(
servicer.Liveness,
request_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.LivenessRequest.FromString,
response_serializer=vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.LivenessResponse.SerializeToString,
),
'Readiness': grpc.unary_unary_rpc_method_handler(
servicer.Readiness,
request_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.ReadinessRequest.FromString,
response_serializer=vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.ReadinessResponse.SerializeToString,
),
'ListUser': grpc.unary_unary_rpc_method_handler(
servicer.ListUser,
request_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.ListUserRequest.FromString,
response_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.ListUserResponse.SerializeToString,
),
'CreateUser': grpc.unary_unary_rpc_method_handler(
servicer.CreateUser,
request_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.CreateUserRequest.FromString,
response_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.CreateUserResponse.SerializeToString,
),
'GetUser': grpc.unary_unary_rpc_method_handler(
servicer.GetUser,
request_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.GetUserRequest.FromString,
response_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.GetUserResponse.SerializeToString,
),
'UpdateUser': grpc.unary_unary_rpc_method_handler(
servicer.UpdateUser,
request_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.UpdateUserRequest.FromString,
response_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.UpdateUserResponse.SerializeToString,
),
'DeleteUser': grpc.unary_unary_rpc_method_handler(
servicer.DeleteUser,
request_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.DeleteUserRequest.FromString,
response_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.DeleteUserResponse.SerializeToString,
),
'LookUpUser': grpc.unary_unary_rpc_method_handler(
servicer.LookUpUser,
request_deserializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.LookUpUserRequest.FromString,
response_serializer=vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.LookUpUserResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'vdp.mgmt.v1alpha.UserService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class UserService(object):
"""User service responds to incoming user requests.
"""
@staticmethod
def Liveness(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.mgmt.v1alpha.UserService/Liveness',
vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.LivenessRequest.SerializeToString,
vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.LivenessResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Readiness(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.mgmt.v1alpha.UserService/Readiness',
vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.ReadinessRequest.SerializeToString,
vdp_dot_mgmt_dot_v1alpha_dot_healthcheck__pb2.ReadinessResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.mgmt.v1alpha.UserService/ListUser',
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.ListUserRequest.SerializeToString,
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.ListUserResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.mgmt.v1alpha.UserService/CreateUser',
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.CreateUserRequest.SerializeToString,
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.CreateUserResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.mgmt.v1alpha.UserService/GetUser',
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.GetUserRequest.SerializeToString,
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.GetUserResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.mgmt.v1alpha.UserService/UpdateUser',
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.UpdateUserRequest.SerializeToString,
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.UpdateUserResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.mgmt.v1alpha.UserService/DeleteUser',
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.DeleteUserRequest.SerializeToString,
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.DeleteUserResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def LookUpUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.mgmt.v1alpha.UserService/LookUpUser',
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.LookUpUserRequest.SerializeToString,
vdp_dot_mgmt_dot_v1alpha_dot_mgmt__pb2.LookUpUserResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 47.278125
| 122
| 0.682266
| 1,496
| 15,129
| 6.511364
| 0.08623
| 0.062519
| 0.051329
| 0.066728
| 0.820142
| 0.800431
| 0.800431
| 0.734729
| 0.722
| 0.684632
| 0
| 0.010656
| 0.249455
| 15,129
| 319
| 123
| 47.426332
| 0.847204
| 0.081631
| 0
| 0.505929
| 1
| 0
| 0.08
| 0.047766
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071146
| false
| 0
| 0.011858
| 0.031621
| 0.126482
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87049636adb445e08e2eb0a1209bd795c90758e4
| 549
|
py
|
Python
|
template/sample.py
|
TomokiHirose/poetry_template
|
380e795ae047681ff12f1445273300db4234763f
|
[
"MIT"
] | 2
|
2021-08-16T12:14:22.000Z
|
2021-09-14T00:51:47.000Z
|
template/sample.py
|
TomokiHirose/poetry_template
|
380e795ae047681ff12f1445273300db4234763f
|
[
"MIT"
] | null | null | null |
template/sample.py
|
TomokiHirose/poetry_template
|
380e795ae047681ff12f1445273300db4234763f
|
[
"MIT"
] | null | null | null |
class Foo:
def say(self) -> str:
"""[summary]
Returns:
str: [description]
"""
return "foo"
def say2(self) -> str:
"""[summary]
Returns:
str: [description]
"""
return "foo2"
class Hoge:
def say(self) -> str:
"""[summary]
Returns:
str: [description]
"""
return "hoge"
def say2(self) -> str:
"""[summary]
Returns:
str: [description]
"""
return "hoge2"
| 15.685714
| 30
| 0.406193
| 44
| 549
| 5.068182
| 0.318182
| 0.125561
| 0.251121
| 0.376682
| 0.852018
| 0.852018
| 0.852018
| 0.852018
| 0.852018
| 0
| 0
| 0.013289
| 0.45173
| 549
| 34
| 31
| 16.147059
| 0.727575
| 0.311475
| 0
| 0.4
| 0
| 0
| 0.062257
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
8723d8496da8d904c918bb1ce958f66157ac0d77
| 6,350
|
py
|
Python
|
tests/test_protocol.py
|
SamuelMarks/enforce
|
8ca1f93cd082d016affd0001b400b6ea2920c4fd
|
[
"MIT"
] | 7
|
2020-10-27T14:38:23.000Z
|
2021-12-21T18:12:57.000Z
|
tests/test_protocol.py
|
sg-s/enforce
|
6abdcfe5f15e42cfa6826a002883283c946f7c8d
|
[
"MIT"
] | null | null | null |
tests/test_protocol.py
|
sg-s/enforce
|
6abdcfe5f15e42cfa6826a002883283c946f7c8d
|
[
"MIT"
] | 1
|
2021-12-21T18:14:21.000Z
|
2021-12-21T18:14:21.000Z
|
from __future__ import absolute_import
import sys
import unittest
import typing
from enforce.protocol import register, is_registered, deregister_all
class TestProtocol(unittest.TestCase):
def setUp(self):
deregister_all(do_it=True)
def test_register_deregister(self):
class A(object):
__protocol_name__ = "main.A"
protocol_definition = register(A)
self.assertTrue(is_registered(A))
deregister_all()
self.assertTrue(is_registered(A))
deregister_all(do_it=True)
self.assertFalse(is_registered(A))
self.assertEqual(protocol_definition.id, A.__protocol_name__)
def test_simple_protocol_registration(self):
class A(object):
def foo(self):
pass
def foo_1(self, data: int):
pass
def foo_2(self, data: str) -> int:
pass
__protocol_name__ = "main.A"
self.assertFalse(is_registered(A.__protocol_name__))
protocol_definition = register(A)
self.assertTrue(is_registered(A.__protocol_name__))
p_id, fields, extra_tests = protocol_definition
expected_result = {
"__class__": "(Assertion) Field Guard for: typing.Callable",
"__delattr__": "(Assertion) Field Guard for: typing.Callable",
"__dir__": "(Assertion) Field Guard for: typing.Callable",
"__doc__": "(Assertion) Field Guard for: typing.Any",
"__eq__": "(Assertion) Field Guard for: typing.Callable",
"__format__": "(Assertion) Field Guard for: typing.Callable",
"__ge__": "(Assertion) Field Guard for: typing.Callable",
"__getattribute__": "(Assertion) Field Guard for: typing.Callable",
"__gt__": "(Assertion) Field Guard for: typing.Callable",
"__hash__": "(Assertion) Field Guard for: typing.Callable",
"__init__": "(Assertion) Field Guard for: typing.Callable",
"__init_subclass__": "(Assertion) Field Guard for: typing.Callable",
"__le__": "(Assertion) Field Guard for: typing.Callable",
"__lt__": "(Assertion) Field Guard for: typing.Callable",
"__ne__": "(Assertion) Field Guard for: typing.Callable",
"__new__": "(Assertion) Field Guard for: typing.Callable",
"__reduce__": "(Assertion) Field Guard for: typing.Callable",
"__reduce_ex__": "(Assertion) Field Guard for: typing.Callable",
"__repr__": "(Assertion) Field Guard for: typing.Callable",
"__setattr__": "(Assertion) Field Guard for: typing.Callable",
"__sizeof__": "(Assertion) Field Guard for: typing.Callable",
"__str__": "(Assertion) Field Guard for: typing.Callable",
"__subclasshook__": "(Assertion) Field Guard for: typing.Callable",
"foo": "(Assertion) Field Guard for: typing.Callable",
"foo_1": "(Assertion) Field Guard for: typing.Callable[[int], typing.Any]",
"foo_2": "(Assertion) Field Guard for: typing.Callable[[str], int]",
}
self.assertEqual(p_id, A.__protocol_name__)
self.assertEqual(len(fields), len(expected_result))
for k, v in expected_result.items():
with self.subTest(k=k):
self.assertEqual(str(fields[k]), v)
self.assertIsNone(extra_tests)
@unittest.skipIf(sys.version_info < (3, 8), "not compatible with Python < 3.8")
def test_parent(self):
T = typing.TypeVar("T", bound="A")
class A(object):
var: int = 12
something_else: typing.Optional[str] = None
val: typing.Callable[[typing.Type[T], int], bool]
def bar(self, data: str) -> str:
pass
__protocol_name__ = "main.A"
protocol_definition = register(A)
p_id, fields, extra_tests = protocol_definition
expected_result = {
"__class__": "(Assertion) Field Guard for: typing.Callable",
"__delattr__": "(Assertion) Field Guard for: typing.Callable",
"__dir__": "(Assertion) Field Guard for: typing.Callable",
"__doc__": "(Assertion) Field Guard for: typing.Any",
"__eq__": "(Assertion) Field Guard for: typing.Callable",
"__format__": "(Assertion) Field Guard for: typing.Callable",
"__ge__": "(Assertion) Field Guard for: typing.Callable",
"__getattribute__": "(Assertion) Field Guard for: typing.Callable",
"__gt__": "(Assertion) Field Guard for: typing.Callable",
"__hash__": "(Assertion) Field Guard for: typing.Callable",
"__init__": "(Assertion) Field Guard for: typing.Callable",
"__init_subclass__": "(Assertion) Field Guard for: typing.Callable",
"__le__": "(Assertion) Field Guard for: typing.Callable",
"__lt__": "(Assertion) Field Guard for: typing.Callable",
"__ne__": "(Assertion) Field Guard for: typing.Callable",
"__new__": "(Assertion) Field Guard for: typing.Callable",
"__reduce__": "(Assertion) Field Guard for: typing.Callable",
"__reduce_ex__": "(Assertion) Field Guard for: typing.Callable",
"__repr__": "(Assertion) Field Guard for: typing.Callable",
"__setattr__": "(Assertion) Field Guard for: typing.Callable",
"__sizeof__": "(Assertion) Field Guard for: typing.Callable",
"__str__": "(Assertion) Field Guard for: typing.Callable",
"__subclasshook__": "(Assertion) Field Guard for: typing.Callable",
"var": "(Assertion) Field Guard for: <class 'int'>",
"something_else": "(Assertion) Field Guard for: typing.Union[str, NoneType]",
"val": "(Assertion) Field Guard for: typing.Callable[[typing.Type[~T], int], bool]",
"bar": "(Assertion) Field Guard for: typing.Callable[[typing.Type[~T], str], str]",
}
self.assertEqual(p_id, A.__protocol_name__)
self.assertEqual(len(fields), len(expected_result))
for k, v in expected_result.items():
with self.subTest(k=k):
self.assertEqual(str(fields[k]), v)
self.assertIsNone(extra_tests)
if __name__ == "__main__":
unittest.main()
| 43.493151
| 96
| 0.611811
| 676
| 6,350
| 5.323965
| 0.159763
| 0.206168
| 0.2798
| 0.323979
| 0.819672
| 0.77466
| 0.764657
| 0.716588
| 0.699639
| 0.643512
| 0
| 0.00213
| 0.260787
| 6,350
| 145
| 97
| 43.793103
| 0.764593
| 0
| 0
| 0.692982
| 0
| 0.017544
| 0.462992
| 0.017323
| 0
| 0
| 0
| 0
| 0.587719
| 1
| 0.070175
| false
| 0.035088
| 0.04386
| 0
| 0.149123
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8737ff08ad2af149bec1f8041ec6b4415bf633f3
| 5,222
|
py
|
Python
|
flying_files.py
|
lidongyv/PSSM
|
61ef78bc465fd53fb128d0aa1b913f787c8c7f74
|
[
"Apache-2.0"
] | null | null | null |
flying_files.py
|
lidongyv/PSSM
|
61ef78bc465fd53fb128d0aa1b913f787c8c7f74
|
[
"Apache-2.0"
] | null | null | null |
flying_files.py
|
lidongyv/PSSM
|
61ef78bc465fd53fb128d0aa1b913f787c8c7f74
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 14:52:01 2018
@author: lidong
"""
import argparse
import os
import sys
from python_pfm import *
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.morphology import disk
from skimage.data import camera
from skimage.filters import roberts, sobel, scharr, prewitt
from skimage.filters.rank import median
from skimage.segmentation import felzenszwalb
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage import feature
pathl=[
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/15mm_focallength/scene_forwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/15mm_focallength/scene_forwards/fast/left',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/15mm_focallength/scene_backwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/15mm_focallength/scene_backwards/fast/left',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/35mm_focallength/scene_forwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/35mm_focallength/scene_forwards/fast/left',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/35mm_focallength/scene_backwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/35mm_focallength/scene_backwards/fast/left'
]
pathl.sort()
pathr=[
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/15mm_focallength/scene_forwards/slow/right',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/15mm_focallength/scene_forwards/fast/right',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/15mm_focallength/scene_backwards/slow/right',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/15mm_focallength/scene_backwards/fast/right',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/35mm_focallength/scene_forwards/slow/right',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/35mm_focallength/scene_forwards/fast/right',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/35mm_focallength/scene_backwards/slow/right',
r'/home/lidong/Documents/datasets/Driving/frames_finalpass/35mm_focallength/scene_backwards/fast/right',
]
pathr.sort()
pathd=[
r'/home/lidong/Documents/datasets/Driving/disparity/15mm_focallength/scene_forwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/disparity/15mm_focallength/scene_forwards/fast/left',
r'/home/lidong/Documents/datasets/Driving/disparity/15mm_focallength/scene_backwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/disparity/15mm_focallength/scene_backwards/fast/left',
r'/home/lidong/Documents/datasets/Driving/disparity/35mm_focallength/scene_forwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/disparity/35mm_focallength/scene_forwards/fast/left',
r'/home/lidong/Documents/datasets/Driving/disparity/35mm_focallength/scene_backwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/disparity/35mm_focallength/scene_backwards/fast/left'
]
pathd.sort()
paths=[
r'/home/lidong/Documents/datasets/Driving/object_index/15mm_focallength/scene_forwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/object_index/15mm_focallength/scene_forwards/fast/left',
r'/home/lidong/Documents/datasets/Driving/object_index/15mm_focallength/scene_backwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/object_index/15mm_focallength/scene_backwards/fast/left',
r'/home/lidong/Documents/datasets/Driving/object_index/35mm_focallength/scene_forwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/object_index/35mm_focallength/scene_forwards/fast/left',
r'/home/lidong/Documents/datasets/Driving/object_index/35mm_focallength/scene_backwards/slow/left',
r'/home/lidong/Documents/datasets/Driving/object_index/35mm_focallength/scene_backwards/fast/left'
]
paths.sort()
p_left_image=[]
p_right_image=[]
p_disparity=[]
p_semantic=[]
output_dir=r'/home/lidong/Documents/datasets/Driving/train_data/'
path=pathd
for p in range(len(path)):
file=os.listdir(path[p])
file.sort()
for f in range(len(file)):
p_disparity.append(os.path.join(path[p],file[f]))
#print(os.path.join(path[p],file[f]))
path=paths
for p in range(len(path)):
file=os.listdir(path[p])
file.sort()
for f in range(len(file)):
p_semantic.append(os.path.join(path[p],file[f]))
#print(os.path.join(path[p],file[f]))
path=pathl
for p in range(len(path)):
file=os.listdir(path[p])
file.sort()
for f in range(len(file)):
p_left_image.append(os.path.join(path[p],file[f]))
#print(os.path.join(path[p],file[f]))
path=pathr
for p in range(len(path)):
file=os.listdir(path[p])
file.sort()
for f in range(len(file)):
p_right_image.append(os.path.join(path[p],file[f]))
#print(os.path.join(path[p],file[f]))
| 38.681481
| 116
| 0.751436
| 715
| 5,222
| 5.33986
| 0.13007
| 0.043216
| 0.095076
| 0.172865
| 0.840492
| 0.839183
| 0.824777
| 0.824777
| 0.824777
| 0.824777
| 0
| 0.016971
| 0.119877
| 5,222
| 135
| 117
| 38.681481
| 0.813751
| 0.045959
| 0
| 0.181818
| 0
| 0
| 0.62837
| 0.62837
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.181818
| 0.170455
| 0
| 0.170455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
87584950a772557d8e8bf6630329a8119e2113ff
| 1,404
|
py
|
Python
|
tests/test_provider_cyrilgdn_postgresql.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_cyrilgdn_postgresql.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_cyrilgdn_postgresql.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_cyrilgdn_postgresql.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:25:01 UTC)
def test_provider_import():
import terrascript.provider.cyrilgdn.postgresql
def test_resource_import():
from terrascript.resource.cyrilgdn.postgresql import postgresql_database
from terrascript.resource.cyrilgdn.postgresql import postgresql_default_privileges
from terrascript.resource.cyrilgdn.postgresql import postgresql_extension
from terrascript.resource.cyrilgdn.postgresql import postgresql_grant
from terrascript.resource.cyrilgdn.postgresql import postgresql_grant_role
from terrascript.resource.cyrilgdn.postgresql import (
postgresql_physical_replication_slot,
)
from terrascript.resource.cyrilgdn.postgresql import postgresql_replication_slot
from terrascript.resource.cyrilgdn.postgresql import postgresql_role
from terrascript.resource.cyrilgdn.postgresql import postgresql_schema
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.cyrilgdn.postgresql
#
# t = terrascript.provider.cyrilgdn.postgresql.postgresql()
# s = str(t)
#
# assert 'https://github.com/cyrilgdn/terraform-provider-postgresql' in s
# assert '1.14.0' in s
| 32.651163
| 86
| 0.792735
| 164
| 1,404
| 6.640244
| 0.402439
| 0.214876
| 0.190083
| 0.256198
| 0.594123
| 0.515152
| 0.515152
| 0.358127
| 0.132231
| 0
| 0
| 0.013234
| 0.138889
| 1,404
| 42
| 87
| 33.428571
| 0.88751
| 0.366809
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0
| 1
| 0.142857
| true
| 0
| 0.857143
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
5e75bdd3b9c7ce6e980e07e6832566eec397b245
| 10,269
|
py
|
Python
|
dymos/phase/test/test_set_time_options.py
|
naylor-b/dymos
|
56ee72041056ae20c3332d060e291c4da93844b1
|
[
"Apache-2.0"
] | null | null | null |
dymos/phase/test/test_set_time_options.py
|
naylor-b/dymos
|
56ee72041056ae20c3332d060e291c4da93844b1
|
[
"Apache-2.0"
] | null | null | null |
dymos/phase/test/test_set_time_options.py
|
naylor-b/dymos
|
56ee72041056ae20c3332d060e291c4da93844b1
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import os
import unittest
import warnings
from openmdao.api import Problem, Group, IndepVarComp, ScipyOptimizeDriver, DirectSolver
from openmdao.utils.assert_utils import assert_rel_error
from dymos import Phase, GaussLobatto
from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
from dymos.examples.double_integrator.double_integrator_ode import DoubleIntegratorODE
class TestPhaseTimeOptions(unittest.TestCase):
@classmethod
def tearDownClass(cls):
for filename in ['phase0_sim.db', 'brachistochrone_sim.db']:
if os.path.exists(filename):
os.remove(filename)
def test_fixed_time_invalid_options(self):
p = Problem(model=Group())
p.driver = ScipyOptimizeDriver()
p.driver.options['dynamic_simul_derivs'] = True
phase = Phase(ode_class=BrachistochroneODE,
transcription=GaussLobatto(num_segments=8, order=3))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(fix_initial=True, fix_duration=True,
initial_bounds=(1.0, 5.0), initial_adder=0.0,
initial_scaler=1.0, initial_ref0=0.0,
initial_ref=1.0, duration_bounds=(1.0, 5.0),
duration_adder=0.0, duration_scaler=1.0,
duration_ref0=0.0, duration_ref=1.0)
phase.set_state_options('x', fix_initial=True, fix_final=True)
phase.set_state_options('y', fix_initial=True, fix_final=True)
phase.set_state_options('v', fix_initial=True, fix_final=False)
phase.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase.add_design_parameter('g', units='m/s**2', opt=False, val=9.80665)
# Minimize time at the end of the phase
phase.add_objective('time', loc='final', scaler=10)
phase.add_boundary_constraint('time', loc='initial', equals=0)
p.model.linear_solver = DirectSolver()
expected_msg0 = 'Phase time options have no effect because fix_initial=True for ' \
'phase \'phase0\': initial_bounds, initial_scaler, initial_adder, ' \
'initial_ref, initial_ref0'
expected_msg1 = 'Phase time options have no effect because fix_duration=True for' \
' phase \'phase0\': duration_bounds, duration_scaler, ' \
'duration_adder, duration_ref, duration_ref0'
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter('always')
p.setup(check=True)
self.assertIn(expected_msg0, [str(w.message) for w in ctx])
self.assertIn(expected_msg1, [str(w.message) for w in ctx])
def test_initial_val_and_final_val_stick(self):
p = Problem(model=Group())
p.driver = ScipyOptimizeDriver()
p.driver.options['dynamic_simul_derivs'] = True
phase = Phase(ode_class=BrachistochroneODE,
transcription=GaussLobatto(num_segments=8, order=3))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(fix_initial=False, fix_duration=False,
initial_val=0.01, duration_val=1.9)
phase.set_state_options('x', fix_initial=True, fix_final=True)
phase.set_state_options('y', fix_initial=True, fix_final=True)
phase.set_state_options('v', fix_initial=True, fix_final=False)
phase.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase.add_design_parameter('g', units='m/s**2', opt=False, val=9.80665)
# Minimize time at the end of the phase
phase.add_objective('time', loc='final', scaler=10)
phase.add_boundary_constraint('time', loc='initial', equals=0)
p.model.linear_solver = DirectSolver()
p.setup(check=True)
assert_rel_error(self, p['phase0.t_initial'], 0.01)
assert_rel_error(self, p['phase0.t_duration'], 1.9)
def test_ex_double_integrator_input_and_fixed_times_warns(self):
"""
Tests that time optimization options cause a ValueError to be raised when t_initial and
t_duration are connected to external sources.
"""
p = Problem(model=Group())
p.driver = ScipyOptimizeDriver()
p.driver.options['dynamic_simul_derivs'] = True
phase = Phase(ode_class=BrachistochroneODE,
transcription=GaussLobatto(num_segments=8, order=3))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(input_initial=True, fix_initial=True,
input_duration=True, fix_duration=True)
phase.set_state_options('x', fix_initial=True, fix_final=True)
phase.set_state_options('y', fix_initial=True, fix_final=True)
phase.set_state_options('v', fix_initial=True, fix_final=False)
phase.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase.add_design_parameter('g', units='m/s**2', opt=False, val=9.80665)
# Minimize time at the end of the phase
phase.add_objective('time', loc='final', scaler=10)
phase.add_boundary_constraint('time', loc='initial', equals=0)
p.model.linear_solver = DirectSolver()
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter('always')
p.setup(check=True)
expected_msg0 = 'Phase \'phase0\' initial time is an externally-connected input, therefore ' \
'fix_initial has no effect.'
expected_msg1 = 'Phase \'phase0\' time duration is an externally-connected input, ' \
'therefore fix_duration has no effect.'
self.assertIn(expected_msg0, [str(w.message) for w in ctx])
self.assertIn(expected_msg1, [str(w.message) for w in ctx])
def test_input_time_invalid_options(self):
p = Problem(model=Group())
p.driver = ScipyOptimizeDriver()
p.driver.options['dynamic_simul_derivs'] = True
phase = Phase(ode_class=BrachistochroneODE,
transcription=GaussLobatto(num_segments=8, order=3))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(input_initial=True, input_duration=True,
initial_bounds=(1.0, 5.0), initial_adder=0.0,
initial_scaler=1.0, initial_ref0=0.0,
initial_ref=1.0, duration_bounds=(1.0, 5.0),
duration_adder=0.0, duration_scaler=1.0,
duration_ref0=0.0, duration_ref=1.0)
phase.set_state_options('x', fix_initial=True, fix_final=True)
phase.set_state_options('y', fix_initial=True, fix_final=True)
phase.set_state_options('v', fix_initial=True, fix_final=False)
phase.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase.add_design_parameter('g', units='m/s**2', opt=False, val=9.80665)
# Minimize time at the end of the phase
phase.add_objective('time', loc='final', scaler=10)
phase.add_boundary_constraint('time', loc='initial', equals=0)
p.model.linear_solver = DirectSolver()
expected_msg0 = 'Phase time options have no effect because fix_initial=True for ' \
'phase \'phase0\': initial_bounds, initial_scaler, initial_adder, ' \
'initial_ref, initial_ref0'
expected_msg1 = 'Phase time options have no effect because fix_duration=True for' \
' phase \'phase0\': duration_bounds, duration_scaler, ' \
'duration_adder, duration_ref, duration_ref0'
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter('always')
p.setup(check=True)
self.assertIn(expected_msg0, [str(w.message) for w in ctx])
self.assertIn(expected_msg1, [str(w.message) for w in ctx])
def test_unbounded_time(self):
p = Problem(model=Group())
p.driver = ScipyOptimizeDriver()
p.driver.options['dynamic_simul_derivs'] = True
phase = Phase(ode_class=BrachistochroneODE,
transcription=GaussLobatto(num_segments=8, order=3))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(fix_initial=False, fix_duration=False)
phase.set_state_options('x', fix_initial=True, fix_final=True)
phase.set_state_options('y', fix_initial=True, fix_final=True)
phase.set_state_options('v', fix_initial=True, fix_final=False)
phase.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase.add_design_parameter('g', units='m/s**2', opt=False, val=9.80665)
# Minimize time at the end of the phase
phase.add_objective('time', loc='final', scaler=10)
phase.add_boundary_constraint('time', loc='initial', equals=0)
p.model.linear_solver = DirectSolver()
p.setup(check=True)
p['phase0.t_initial'] = 0.0
p['phase0.t_duration'] = 2.0
p['phase0.states:x'] = phase.interpolate(ys=[0, 10], nodes='state_input')
p['phase0.states:y'] = phase.interpolate(ys=[10, 5], nodes='state_input')
p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input')
p['phase0.controls:theta'] = phase.interpolate(ys=[5, 100], nodes='control_input')
p['phase0.design_parameters:g'] = 9.80665
p.run_driver()
self.assertTrue(p.driver.result.success,
msg='Brachistochrone with outbounded times has failed')
if __name__ == '__main__':
unittest.main()
| 41.240964
| 102
| 0.627325
| 1,285
| 10,269
| 4.800778
| 0.142412
| 0.035662
| 0.043119
| 0.044091
| 0.801751
| 0.788296
| 0.779219
| 0.757821
| 0.757821
| 0.757821
| 0
| 0.028575
| 0.260493
| 10,269
| 248
| 103
| 41.407258
| 0.783777
| 0.031551
| 0
| 0.716981
| 0
| 0
| 0.14179
| 0.006963
| 0
| 0
| 0
| 0
| 0.062893
| 1
| 0.037736
| false
| 0
| 0.056604
| 0
| 0.100629
| 0.006289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e8453686bce7ca3afbfadd5a9b86fc15f9d4b2f
| 118,325
|
py
|
Python
|
V1/R2/N2HF_locations_v1_r2.py
|
N2HF-OFFICIAL/n2hf
|
de4a26a3b70082cd2375cc3fe7a5c2fd09cec085
|
[
"MIT"
] | null | null | null |
V1/R2/N2HF_locations_v1_r2.py
|
N2HF-OFFICIAL/n2hf
|
de4a26a3b70082cd2375cc3fe7a5c2fd09cec085
|
[
"MIT"
] | null | null | null |
V1/R2/N2HF_locations_v1_r2.py
|
N2HF-OFFICIAL/n2hf
|
de4a26a3b70082cd2375cc3fe7a5c2fd09cec085
|
[
"MIT"
] | null | null | null |
from PIL import Image
import os
import numpy as np
import pandas as pd
from random import seed
from random import randint
dirname = os.path.dirname('')
dimensions = 600, 600
def make_locations(ma, ma2, ma3):
count_jinjusung = 0
for x in range(0, ma2):
b=ma3
seed(x+b)
## 1이 진한거 2가 연한거
## a : 벽돌
a1 = (randint(80, 110), randint(80, 110), randint(70, 100))
a2 = (randint(120, 150), randint(115, 145), randint(70, 100))
a3 = (randint(170, 200), randint(165, 195), randint(105, 135))
## b : 검은색
b = (0, 0, 0)
## c : 기와
c1 = (randint(20, 50), randint(20, 50), randint(20, 50))
c2 = (randint(50, 80), randint(50, 80), randint(50, 80))
c3 = (randint(85, 115), randint(85, 115), randint(85, 115))
## d : 기둥
d1 = (randint(15, 45), randint(0, 20), randint(0, 20))
d2 = (randint(35, 65), randint(0, 20), randint(0, 20))
## e : 무늬
e1 = (randint(0, 10), randint(15, 45), randint(5, 35))
e2 = (randint(0, 10), randint(30, 60), randint(20, 50))
e3 = (randint(0, 10), randint(50, 80), randint(35, 65))
## f : 나무
f1 = (randint(0, 30), randint(0, 20), randint(0, 10))
f2 = (randint(25, 55), randint(0, 30), randint(0, 10))
f3 = (randint(30, 60), randint(10, 40), randint(0, 15))
## g : 깃발
g1 = (randint(0, 10), randint(0, 10), randint(45, 75))
## h : 풀밭
h1 = (randint(0, 10), randint(50, 80), randint(5, 35))
h2 = (randint(0, 20), randint(60, 90), randint(5, 35))
## i : 흙
i1 = (randint(40, 70), randint(35, 65), randint(0, 10))
i2 = (randint(60, 90), randint(55, 85), randint(0, 10))
## 배경색
bgq = randint(0, 400)
if bgq < 100:
bg1_1 = (randint(50, 80), randint(135, 165), randint(155, 185))
elif 100<=bgq<200:
bg1_1 = (randint(140, 170), randint(40, 70), randint(0, 30))
elif 200<=bgq<300:
bg1_1 = (randint(135, 165), randint(140, 170), randint(0, 30))
else:
bg1_1 = (randint(0, 30), randint(10, 40), randint(140, 170))
bge = randint(0, 400)
if bge < 100:
bg1_2 = (randint(155, 185), randint(120, 150), randint(80, 110))
elif 100<=bge<200:
bg1_2 = (randint(80, 110), randint(45, 75), randint(0, 30))
elif 200<=bge<300:
bg1_2 = (randint(60, 90), randint(55, 85), randint(0, 10))
else:
bg1_2 = (randint(0, 10), randint(60, 90), randint(20, 50))
#bgw = randint(0, 300)
#if bgw <100 :
bg2_1 = (randint(30, 60), randint(70, 100), randint(80, 110))
bg2_2 = (randint(40, 70), randint(85, 115), randint(95, 125))
bg2_3 = (randint(50, 80), randint(105, 135), randint(120, 150))
bg2_4 = (randint(60, 90), randint(125, 155), randint(140, 170))
bg2_5 = (randint(65, 95), randint(150, 180), randint(170, 200))
bg2_6 = (randint(70, 100), randint(155, 185), randint(170, 200))
bg2_7 = (randint(75, 105), randint(155, 185), randint(175, 205))
bg2_8 = (randint(185, 255), randint(185, 255), randint(185, 255))
#elif 100 <= bgw < 200:
# bg2_1 = (randint(30, 60), randint(70, 100), randint(80, 110))
#bg2_2 = (randint(40, 70), randint(85, 115), randint(95, 125))
#bg2_3 = (randint(50, 80), randint(105, 135), randint(120, 150))
#bg2_4 = (randint(60, 90), randint(125, 155), randint(140, 170))
#bg2_5 = (randint(65, 95), randint(150, 180), randint(170, 200))
#bg2_6 = (randint(70, 100), randint(155, 185), randint(170, 200))
#bg2_7 = (randint(75, 105), randint(155, 185), randint(175, 205))
#bg2_8 = (randint(185, 255), randint(185, 255), randint(185, 255))
#else:
# bg2_1 = (randint(30, 60), randint(70, 100), randint(80, 110))
#bg2_2 = (randint(40, 70), randint(85, 115), randint(95, 125))
#bg2_3 = (randint(50, 80), randint(105, 135), randint(120, 150))
#bg2_4 = (randint(60, 90), randint(125, 155), randint(140, 170))
#bg2_5 = (randint(65, 95), randint(150, 180), randint(170, 200))
#bg2_6 = (randint(70, 100), randint(155, 185), randint(170, 200))
#bg2_7 = (randint(75, 105), randint(155, 185), randint(175, 205))
#bg2_8 = (randint(185, 255), randint(185, 255), randint(185, 255))
JinJuSung1 = [
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, c1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, c1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, c1, b, b, b, b, b, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, b, b, b, b, b, c1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, a3, a3, a3, a3, a3, a3, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, a3, a3, a3, a3, a3, a3, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, a1, c1, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, b, b, b, b, b, a1, a1, a1, c1, c1, c2, c1, c2, c1, c2, c1, c2, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, c1, c2, c1, c2, c1, c2, c1, c1, a3, a3, a3, a3, b, b, b, b, b, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a1, a1, a1, a1, a1, a1, a1, a1, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a1, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c1, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, c1, c1, c2, c2, c1, c2, c1, c2, c2, c2, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c2, c2, c2, c1, c2, c2, c1, c2, c2, c1, c1, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, b, b, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c2, c1, b, b, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f2, f3, e1, b, b, b, b, b, b, b, b, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, b, b, b, b, b, b, b, b, e1, f3, f2, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f3, f1, f3, f3, f3, e1, e1, e1, e1, e1, e1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, e1, e1, e1, e1, e1, e1, f3, f3, f3, f1, f3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f2, f1, f1, f1, f3, f3, f3, f3, f2, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, f2, f3, f3, f3, f3, f1, f1, f1, f2, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f3, f1, f1, f1, f1, f1, f1, f2, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, f2, f1, f1, f1, f1, f1, f1, f3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f2, f1, f1, f1, f1, f2, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, f2, f1, f1, f1, f1, f2, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f3, f1, f1, f1, f1, f2, e3, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e3, f2, f1, f1, f1, f1, f3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f2, f1, f1, f1, f1, b, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, b, f1, f1, f1, f1, f2, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f3, f1, f1, f1, b, e3, e3, e3, e3, e3, e3, e3, e3, b, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, b, e3, e3, e3, e3, e3, e3, e3, e3, b, f1, f1, f1, f3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f3, b, b, f1, b, b, d2, d2, d2, d2, d2, d2, d2, d2, b, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, b, d2, d2, d2, d2, d2, d2, d2, d2, b, b, f1, b, b, f3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, bg1_1, bg1_1, b, bg1_1, b, d2, e3, e3, e3, e3, e3, d2, d2, b, d2, d2, e3, e3, e3, e3, e3, d2, e3, e3, e3, e3, e3, d2, e3, e3, e3, e3, d2, d2, b, d2, d2, e3, e3, e3, e3, e3, d2, b, bg1_1, b, bg1_1, bg1_1, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, d2, e3, e3, e3, e3, e3, d2, d2, b, d2, d2, e3, e3, e3, e3, e3, d2, e3, e3, e3, e3, e3, d2, e3, e3, e3, e3, d2, d2, b, d2, d2, e3, e3, e3, e3, e3, d2, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, b, b, b, a1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, a1, b, b, b, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, b, b, b, a1, a1, a1, a1, a1, a1, c1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, c1, a1, a1, a3, a3, a3, a3, b, b, b, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a1, a1, a1, a1, a1, a1, c2, c1, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c1, c1, c2, a3, a3, a3, a3, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, c1, c1, c1, c2, c1, c1, c2, c1, c1, c2, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c2, c1, c1, c2, c2, c1, c1, c2, c1, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, c1, c1, c2, c2, c1, c2, c2, c1, c2, c2, c1, c2, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c2, c1, c1, c2, c2, c1, c1, c2, c2, c1, c2, c2, c1, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, c1, c1, c2, c1, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, c1, c2, c2, c1, c1, c2, c1, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, b, b, b, b, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, b, b, b, b, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f2, f3, f3, f3, f3, b, b, b, b, b, b, b, b, b, b, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, b, b, b, b, b, b, b, b, b, b, f3, f3, f3, f3, f2, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f3, f1, f1, f1, f3, e1, e1, e1, e1, e2, e1, e1, e1, e1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, e1, e2, e1, e1, e1, e1, e2, e1, e1, f3, f1, f1, f1, f3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f2, f1, f1, f1, f3, f2, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, f2, f3, f1, f1, f1, f2, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f3, f1, f1, f1, f2, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, f2, f1, f1, f1, f3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, f2, f3, f2, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, f2, f3, f2, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, f2, e2, e3, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e3, e2, f2, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, d1, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, d1, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, bg1_1, d1, d1, d1, d1, bg1_1, bg1_1, bg1_1, d1, d1, bg1_1, bg1_1, bg1_1, d1, d1, bg1_1, bg1_1, bg1_1, d1, d1, bg1_1, bg1_1, bg1_1, bg1_1, d1, d1, bg1_1, bg1_1, bg1_1, d1, d1, bg1_1, bg1_1, bg1_1, d1, d1, bg1_1, bg1_1, bg1_1, d1, d1, d1, d1, bg1_1, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, bg1_1, bg1_1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, bg1_1, bg1_1, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, b, b, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, b, a3, a3, a3, a3, b, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, b, a3, a3, b, b, b, b, a3, a3, b, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, b, b, b, b, b, b, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, b, b, b, b, b, b, b, b, b, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, b, b, b, b, b, c1, c1, c1, c1, b, b, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, b, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, b, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, b, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1, bg1_1],
[bg1_1, bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1, bg1_1],
[bg1_1, b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b, bg1_1],
[b, a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3, b],
[a3, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, c1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, a3],
[a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, c1, b, b, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, b, b, b, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3],
[b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, a3, b, b, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, b, b, b, a3, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b],
[b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
[bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2, bg1_2],
]
JinJuSung2 = [
[bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_8, bg2_3, bg2_3, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_8, bg2_3, bg2_8, bg2_3, bg2_8, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_8, bg2_6, bg2_8, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_8, bg2_3, bg2_3, bg2_3, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_8, bg2_8, bg2_8, bg2_3, bg2_3, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_8, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_8, bg2_3, bg2_3, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_8, bg2_8, bg2_5, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_8, bg2_8, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_8, bg2_8, bg2_8, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_5, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_7, bg2_7, bg2_7, bg2_8, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_7, bg2_7, bg2_7, bg2_8, bg2_7, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_5, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_7, bg2_7, bg2_8, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_7, bg2_7, bg2_8, bg2_7, bg2_7, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_7, bg2_8, bg2_8, bg2_8, bg2_7, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_5, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_7, bg2_7, bg2_7, bg2_8, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_5, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_7, bg2_7, bg2_8],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_8, bg2_4, bg2_8, bg2_4, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_8, bg2_6, bg2_8, bg2_7, bg2_8],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_7, bg2_8],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_7],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, c1, bg2_4, bg2_4, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, c1, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, bg2_4, c1, b, b, b, b, b, b, b, b, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, b, b, b, b, b, b, b, b, c1, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_4, bg2_4, bg2_4, b, a3, a3, a3, a3, a3, a3, a3, a3, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, a3, a3, a3, a3, a3, a3, a3, a3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, b, b, a1, c1, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, b, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, b, b, b, b, b, b, b, a1, a1, a1, c1, c1, c2, c1, c2, c1, c2, c1, c2, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, c1, c2, c1, c2, c1, c2, c1, c1, a3, a3, a3, a3, b, b, b, b, b, b, b, b, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, a1, a1, a1, a1, a1, a1, a1, a1, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_8, bg2_8, bg2_8, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, a1, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c1, a3, b, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_2, bg2_8, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, c1, c1, c2, c2, c1, c2, c1, c2, c2, c2, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c2, c2, c2, c1, c2, c2, c1, c2, c2, c1, c1, b, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_8, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, b, b, b, b, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c2, c1, b, b, b, b, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_8, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, d2, e3, d2, b, b, b, b, b, b, b, b, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, b, b, b, b, b, b, b, b, a3, d2, a3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_2, bg2_8, bg2_8, bg2_8, bg2_2, bg2_2, bg2_2, bg2_8, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, f3, f1, f3, e3, d2, e3, d2, e3, d2, e3, d2, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, a3, d2, a3, d2, a3, d2, a3, d2, f3, f1, f3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_8, bg2_8, bg2_8, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, f2, f1, f1, f1, f3, f3, f3, f3, f2, e1, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, a3, d2, a3, d2, a3, d2, a3, d2, a3, d2, a3, d2, a3, d2, a3, d2, e1, f2, f3, f3, f3, f3, f1, f1, f1, f2, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_8, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_2, bg2_8, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, f3, f1, f1, f1, f1, f1, f1, f2, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, f2, f1, f1, f1, f1, f1, f1, f3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_8, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_8, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, f2, f1, f1, f1, f1, f2, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, f2, f1, f1, f1, f1, f2, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_5, bg2_5, bg2_5, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_8, bg2_8, bg2_2, bg2_2, bg2_2, bg2_8, bg2_2, bg2_8, bg2_2, bg2_8, bg2_2, bg2_8, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, f3, f1, f1, f1, f1, f2, e3, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e3, f2, f1, f1, f1, f1, f3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_5, bg2_5, bg2_8, bg2_5, bg2_5, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6, bg2_8, bg2_6, bg2_6],
[bg2_8, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, f2, f1, f1, f1, f1, b, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, b, f1, f1, f1, f1, f2, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_5, bg2_8, bg2_5, bg2_8, bg2_5, bg2_8, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_6, bg2_6, bg2_6],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_8, bg2_8, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, f3, f1, f1, f1, b, e3, e3, e3, e3, e3, e3, e3, e3, b, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, b, e3, e3, e3, e3, e3, e3, e3, e3, b, f1, f1, f1, f3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_5, bg2_5, bg2_5, bg2_8, bg2_5, bg2_5, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_8, bg2_8, bg2_8],
[bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, f3, b, b, f1, b, b, d2, d2, d2, d2, d2, d2, d2, d2, b, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, d2, b, d2, d2, d2, d2, d2, d2, d2, d2, b, b, f1, b, b, f3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6, bg2_6],
[bg2_8, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_8, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, bg2_3, bg2_3, b, bg2_3, b, d2, e3, e3, e3, e3, e3, d2, d2, b, d2, d2, e3, e3, e3, e3, e3, d2, e3, e3, e3, e3, e3, d2, e3, e3, e3, e3, d2, d2, b, d2, d2, e3, e3, e3, e3, e3, d2, b, bg2_5, b, bg2_5, bg2_5, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_6],
[bg2_2, bg2_2, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, bg2_3, b, d2, e3, e3, e3, e3, e3, d2, d2, b, d2, d2, e3, e3, e3, e3, e3, d2, e3, e3, e3, e3, e3, d2, e3, e3, e3, e3, d2, d2, b, d2, d2, e3, e3, e3, e3, e3, d2, b, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_5, bg2_5, bg2_5, bg2_5, bg2_7, bg2_8, bg2_8, bg2_8],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_3, bg2_3, bg2_3, b, b, b, b, b, b, a1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, a1, b, b, b, b, b, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_8, bg2_8, bg2_8, bg2_8, bg2_8, bg2_7, bg2_7, bg2_7],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, b, b, b, b, b, a1, a1, a1, a1, a1, a1, c1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, a1, c1, a1, a1, a3, a3, a3, a3, b, b, b, b, b, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_7, bg2_7],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, a1, a1, a1, a1, a1, a1, c2, c1, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c1, c1, c2, a3, a3, a3, a3, a3, a3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_7],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, c1, c1, c1, c2, c1, c1, c2, c1, c1, c2, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c2, c1, c1, c2, c2, c1, c1, c2, c1, a3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, c1, c1, c2, c2, c1, c2, c2, c1, c2, c2, c1, c2, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c2, c1, c1, c2, c2, c1, c1, c2, c2, c1, c2, c2, c1, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, b, c1, c1, c2, c1, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, c1, c2, c2, c1, c1, c2, c1, b, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, b, b, b, b, b, b, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c2, c1, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, b, b, b, b, b, b, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, e3, d2, e3, d2, e3, b, b, b, b, b, b, b, b, b, b, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c2, c1, c1, c2, c1, c1, b, b, b, b, b, b, b, b, b, b, d2, e3, d2, e3, d2, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_1, bg2_1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, f3, f1, f1, f1, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, f1, f1, f1, f3, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, d2, d2, bg2_2, bg2_2, b, f2, f1, f1, f1, f3, f2, e1, e1, e1, e3, e1, e1, e1, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, d2, e3, e3, e1, e1, e1, e1, e3, e1, f2, f3, f1, f1, f1, f2, b, bg2_4, bg2_5, bg2_5, d2, d2, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, f3, d2, bg2_2, bg2_2, bg2_2, b, f3, f1, f1, f1, f2, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, e1, e1, e1, e1, e2, f2, f1, f1, f1, f3, b, bg2_4, bg2_4, bg2_4, bg2_5, f3, d2, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, f3, d2, bg2_2, bg2_2, bg2_2, bg2_2, b, f2, f3, f2, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, e1, e1, e3, e1, e1, f2, f3, f2, b, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, f3, d2, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, g1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, f3, d2, d2, bg2_2, bg2_2, bg2_2, bg2_2, b, b, f2, e2, e3, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e2, e3, e2, f2, b, b, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, f3, d2, d2, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, g1, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, f3, g1, g1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, f3, d2, d2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, b, b, d1, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, e3, d1, b, b, b, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, f3, d2, d2, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, f3, g1, g1, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, f3, g1, g1, g1, bg2_2, bg2_2, bg2_2, bg2_2, f3, bg2_2, d2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, b, bg2_2, d1, d1, d1, d1, bg2_2, bg2_2, bg2_3, d1, d1, bg2_3, bg2_3, bg2_3, d1, d1, bg2_3, bg2_3, bg2_3, d1, d1, bg2_3, bg2_3, bg2_3, bg2_3, d1, d1, bg2_3, bg2_3, bg2_3, d1, d1, bg2_3, bg2_3, bg2_4, d1, d1, bg2_4, bg2_4, bg2_4, d1, d1, d1, d1, bg2_4, b, b, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, f3, bg2_4, d2, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, f3, g1, g1, g1, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, f3, g1, g1, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, f3, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, bg2_2, b, b, bg2_2, bg2_2, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, d1, bg2_4, bg2_4, b, b, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, f3, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, f3, g1, g1, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5],
[e1, e1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, f3, g1, bg2_1, bg2_1, bg2_2, bg2_2, bg2_2, bg2_2, f3, bg2_2, bg2_2, bg2_2, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, bg2_4, bg2_4, bg2_4, f3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, f3, g1, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, e1],
[f3, e1, e1, e1, e1, bg2_1, bg2_1, bg2_1, g1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_2, bg2_2, bg2_2, f3, bg2_2, bg2_2, bg2_2, b, a3, a3, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, c2, a3, a3, b, bg2_4, bg2_4, bg2_4, f3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, g1, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, f3],
[f3, e1, e1, e1, bg2_1, bg2_1, bg2_1, bg2_1, f3, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_2, bg2_2, f3, bg2_2, bg2_2, bg2_2, b, a3, a3, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, a3, a3, b, bg2_4, bg2_4, bg2_4, f3, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_5, f3, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, f3],
[e1, e1, e1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, f3, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_2, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, f3, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, e1],
[f3, bg2_1, e1, e1, bg2_1, bg2_1, bg2_1, bg2_1, f3, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, b, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, b, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, f3, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, f3],
[f3, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, f3, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, bg2_4, f3, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, f3],
[f3, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, b, b, b, b, b, b, b, b, b, a3, a3, b, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, b, a3, a3, b, b, b, b, b, b, b, b, b, bg2_5, bg2_5, bg2_5, bg2_5, bg2_5, f3],
[f3, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, b, c2, c2, c2, c2, c2, c2, b, a3, a3, b, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, b, b, b, b, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, b, a3, a3, b, c2, c2, c2, c2, c2, c2, b, bg2_4, bg2_5, bg2_5, bg2_5, bg2_5, f3],
[f3, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, b, c3, c3, c3, c2, c3, b, a3, a3, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, b, a3, a3, a3, a3, b, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, a3, a3, b, c3, c2, c3, c3, c3, b, bg2_4, bg2_4, bg2_5, bg2_5, bg2_5, f3],
[f3, bg2_1, bg2_1, bg2_1, bg2_1, bg2_1, b, c3, c3, c3, c2, b, a3, a3, b, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, b, b, a3, a3, b, b, b, b, a3, a3, b, b, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, b, a3, a3, b, c2, c3, c3, c3, b, bg2_4, bg2_4, bg2_4, bg2_5, bg2_5, f3],
[b, b, b, b, b, b, b, c2, c2, c2, b, a3, a3, b, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, b, a3, a3, b, b, b, b, b, b, b, b, a3, a3, b, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, b, a3, a3, b, c2, c2, c2, b, b, b, b, b, b, b],
[c2, c2, c2, c2, c2, c2, c2, c3, c3, b, a3, a3, b, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, b, a3, b, b, b, b, b, b, b, b, b, b, b, b, a3, b, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, b, a3, a3, b, c3, c3, c2, c2, c2, c2, c2, c2, c2],
[c3, c3, c3, c2, c3, c3, c3, c2, b, a3, a3, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, a3, b, b, b, b, b, b, c1, c1, c1, c1, b, b, b, b, a3, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, a3, a3, b, c2, c3, c3, c3, c2, c3, c3, c3],
[c3, c3, c3, c2, c3, c3, c3, b, a3, a3, b, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, b, a3, b, b, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, b, b, b, a3, b, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, b, a3, a3, b, c3, c3, c3, c2, c3, c3, c3],
[c2, c2, c2, c3, c2, c3, b, a3, a3, b, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, b, a3, b, b, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, b, a3, a3, b, c3, c2, c3, c2, c2, c2],
[c3, c2, c3, c3, c3, b, a3, a3, b, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, b, a3, b, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, a3, b, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, b, a3, a3, b, c3, c3, c3, c2, c3],
[c3, c2, c3, c3, b, a3, a3, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, a3, a3, b, c3, c3, c2, c3],
[c2, c3, c2, b, a3, a3, b, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, b, a3, a3, b, c2, c3, c2],
[c3, c3, b, a3, a3, b, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, b, a3, a3, b, c3, c3],
[c3, b, a3, a3, b, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, b, a3, a3, b, c3],
[b, a3, a3, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, a3, b, c1, b, b, b, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, c1, b, b, a3, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, a3, a3, b],
[a3, a3, b, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, b, a3, b, c1, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, a3, b, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a3, b, a3, a3],
[a3, b, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, b, a3, b, c1, b, b, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, b, b, b, b, b, a3, b, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a3, a3, b, a3],
[b, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, b, a3, b, b, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, b, b, b, a3, b, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a2, a3, a3, a3, a3, a3, a3, a3, b],
[b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b],
[e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, a2, a2, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a2, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1],
[h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, a2, a2, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a3, a2, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, a2, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, e1, e1, e1, e1, e1, e1, e1, e1, e1, e1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, i1, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h2, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h2, h2, h2, h2, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h2, h2, h2, h2, h2, h2, i1, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, h2, h2, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h1, h1, h1, h1, h2, h2, h2, h2, h2, h2, h2, h2, h2, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, h2, h2, h2, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h1, h1, h1, h2, h2, h2, h2, h2, h2, h2, h2, h2, h2, h2, h2, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, h2, h2, h2, h2, h2, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
[h2, h2, h2, h2, h2, h2, h2, h2, h2, h2, h2, h2, h2, i1, i1, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i2, i1, h2, h2, h2, h2, h2, h2, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1, h1],
]
y = randint(12501,13000)
seed(y)
if ma == "jinjusung":
pixels = JinJuSung1
p = "jinjusung"
count_jinjusung += 1
elif ma == "jinjusungB":
pixels = JinJuSung2
p = "jinjusung"
count_jinjusung += 1
array = np.array(pixels, dtype=np.uint8)
new_image = Image.fromarray(array)
new_image = new_image.resize(dimensions, resample=0)
if p == "jinjusung":
imgname = dirname + '/Locations/' + p + '_' + str(count_jinjusung) + '.png'
new_image.save(imgname)
make_locations("jinjusung", 100, 5234)
#make_locations("jinjusungB", 50, 54)
| 334.251412
| 714
| 0.611891
| 31,193
| 118,325
| 1.98442
| 0.004777
| 0.30475
| 0.524491
| 0.599418
| 0.978417
| 0.975509
| 0.972165
| 0.970275
| 0.968756
| 0.965897
| 0
| 0.315978
| 0.207902
| 118,325
| 353
| 715
| 335.1983
| 0.344465
| 0.010074
| 0
| 0.175
| 0
| 0
| 0.000608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003571
| false
| 0
| 0.021429
| 0
| 0.025
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
217727a4e132823729a6528e0ca03b127d442620
| 217
|
py
|
Python
|
oml/serializers/__init__.py
|
getumen/oml
|
f4ec7fd3f04ff528353c0475c9330fe1ed3b63f9
|
[
"MIT"
] | 1
|
2017-07-25T21:53:28.000Z
|
2017-07-25T21:53:28.000Z
|
oml/serializers/__init__.py
|
getumen/oml
|
f4ec7fd3f04ff528353c0475c9330fe1ed3b63f9
|
[
"MIT"
] | null | null | null |
oml/serializers/__init__.py
|
getumen/oml
|
f4ec7fd3f04ff528353c0475c9330fe1ed3b63f9
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import generators
from __future__ import division
from . import (
serializer
)
| 21.7
| 39
| 0.843318
| 26
| 217
| 6.153846
| 0.423077
| 0.3125
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 217
| 9
| 40
| 24.111111
| 0.860215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0.125
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
df2bb139d5b506337814761e91978ec4d245720c
| 1,247
|
py
|
Python
|
2021/06/code.py
|
ErikBavenstrand/Advent-of-Code
|
d4879dfb8d70d817cf57ab6a601f22e91d5ed8e1
|
[
"MIT"
] | null | null | null |
2021/06/code.py
|
ErikBavenstrand/Advent-of-Code
|
d4879dfb8d70d817cf57ab6a601f22e91d5ed8e1
|
[
"MIT"
] | null | null | null |
2021/06/code.py
|
ErikBavenstrand/Advent-of-Code
|
d4879dfb8d70d817cf57ab6a601f22e91d5ed8e1
|
[
"MIT"
] | null | null | null |
# Advent of Code 2021 Day 06
# Author: Erik Båvenstrand
# URL: https://adventofcode.com/2021/day/6
def part_a(data: list[str]):
data_a = data[0].split(",")
n_fish = [0, 0, 0, 0, 0, 0, 0, 0, 0]
t_n_fish = n_fish.copy()
for fish in data_a:
n_fish[int(fish)] += 1
for day in range(80):
t_n_fish[0] = n_fish[1]
t_n_fish[1] = n_fish[2]
t_n_fish[2] = n_fish[3]
t_n_fish[3] = n_fish[4]
t_n_fish[4] = n_fish[5]
t_n_fish[5] = n_fish[6]
t_n_fish[6] = n_fish[7] + n_fish[0]
t_n_fish[7] = n_fish[8]
t_n_fish[8] = n_fish[0]
n_fish = t_n_fish.copy()
return sum(n_fish)
def part_b(data: list[str]):
data_b = data[0].split(",")
n_fish = [0, 0, 0, 0, 0, 0, 0, 0, 0]
t_n_fish = n_fish.copy()
for fish in data_b:
n_fish[int(fish)] += 1
for day in range(256):
t_n_fish[0] = n_fish[1]
t_n_fish[1] = n_fish[2]
t_n_fish[2] = n_fish[3]
t_n_fish[3] = n_fish[4]
t_n_fish[4] = n_fish[5]
t_n_fish[5] = n_fish[6]
t_n_fish[6] = n_fish[7] + n_fish[0]
t_n_fish[7] = n_fish[8]
t_n_fish[8] = n_fish[0]
n_fish = t_n_fish.copy()
return sum(n_fish)
| 24.45098
| 43
| 0.535686
| 254
| 1,247
| 2.314961
| 0.165354
| 0.442177
| 0.22449
| 0.081633
| 0.782313
| 0.782313
| 0.782313
| 0.782313
| 0.782313
| 0.693878
| 0
| 0.087558
| 0.303929
| 1,247
| 50
| 44
| 24.94
| 0.589862
| 0.073777
| 0
| 0.777778
| 0
| 0
| 0.001738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
df3e05e437f3e6fdfc613eae72a4f1bc82c2a75f
| 569
|
py
|
Python
|
revgraph/dl/core/regularizers.py
|
shhoalex/revgraph
|
7060945aa46fbd9584861715f15b6fc8037ba53f
|
[
"MIT"
] | 9
|
2020-06-27T07:01:00.000Z
|
2020-10-23T13:50:04.000Z
|
revgraph/dl/core/regularizers.py
|
shinghinho/revgraph
|
7060945aa46fbd9584861715f15b6fc8037ba53f
|
[
"MIT"
] | null | null | null |
revgraph/dl/core/regularizers.py
|
shinghinho/revgraph
|
7060945aa46fbd9584861715f15b6fc8037ba53f
|
[
"MIT"
] | null | null | null |
from .utils import *
@register
def l1(l1: float = 0.01) -> Regularizer:
def function(x: rc.tensor) -> rc.tensor:
return l1 * rc.sum(rc.abs(x))
return function
@register
def l2(l2: float = 0.01) -> Regularizer:
def function(x: rc.tensor) -> rc.tensor:
return l2 * rc.sum(rc.square(x))
return function
@register
def l1_l2(l1: float = 0.01,
l2: float = 0.01) -> Regularizer:
def function(x: rc.tensor) -> rc.tensor:
return (l1 * rc.sum(rc.abs(x)) +
l2 * rc.sum(rc.square(x)))
return function
| 22.76
| 44
| 0.58348
| 86
| 569
| 3.848837
| 0.22093
| 0.145015
| 0.096677
| 0.172205
| 0.861027
| 0.752266
| 0.752266
| 0.752266
| 0.570997
| 0.570997
| 0
| 0.057143
| 0.261863
| 569
| 24
| 45
| 23.708333
| 0.730952
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.055556
| 0.166667
| 0.722222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
df7b4df75a9cedb61f69b1090758b9c13ff5d42e
| 36
|
py
|
Python
|
day-22-pong/test.py
|
jskolnicki/100-Days-of-Python
|
146af2b73914a525121f1c91737abd4857dc2f89
|
[
"CNRI-Python"
] | null | null | null |
day-22-pong/test.py
|
jskolnicki/100-Days-of-Python
|
146af2b73914a525121f1c91737abd4857dc2f89
|
[
"CNRI-Python"
] | null | null | null |
day-22-pong/test.py
|
jskolnicki/100-Days-of-Python
|
146af2b73914a525121f1c91737abd4857dc2f89
|
[
"CNRI-Python"
] | null | null | null |
print(204.0 + 180)
print(384 % 360)
| 12
| 18
| 0.638889
| 7
| 36
| 3.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.433333
| 0.166667
| 36
| 3
| 19
| 12
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
df9379a3162e1dc949ff1d773e25e66933daeb21
| 14,190
|
py
|
Python
|
caffe2darknet.py
|
Alin1102/Yolov3_Dartnet2Caffe
|
b4284b080f53c1ac73c1930b1b1c4e07dcd97559
|
[
"MIT"
] | 21
|
2018-11-28T13:57:37.000Z
|
2021-05-17T15:08:33.000Z
|
caffe2darknet.py
|
Alin1102/Yolov3_Dartnet2Caffe
|
b4284b080f53c1ac73c1930b1b1c4e07dcd97559
|
[
"MIT"
] | 5
|
2018-02-26T11:54:03.000Z
|
2020-12-16T05:27:44.000Z
|
caffe2darknet.py
|
Alin1102/Yolov3_Dartnet2Caffe
|
b4284b080f53c1ac73c1930b1b1c4e07dcd97559
|
[
"MIT"
] | 9
|
2019-01-03T03:57:00.000Z
|
2021-05-07T02:27:47.000Z
|
#!/home/ubuntu/anaconda2/bin/python -f
from collections import OrderedDict
from cfg import *
from prototxt import *
import numpy as np
def caffe2darknet(protofile, caffemodel):
model = parse_caffemodel(caffemodel)
layers = model.layer
if len(layers) == 0:
print 'Using V1LayerParameter'
layers = model.layers
lmap = {}
for l in layers:
lmap[l.name] = l
net_info = parse_prototxt(protofile)
props = net_info['props']
wdata = []
blocks = []
block = OrderedDict()
block['type'] = 'net'
if props.has_key('input_shape'):
block['batch'] = props['input_shape']['dim'][0]
block['channels'] = props['input_shape']['dim'][1]
block['height'] = props['input_shape']['dim'][2]
block['width'] = props['input_shape']['dim'][3]
else:
block['batch'] = props['input_dim'][0]
block['channels'] = props['input_dim'][1]
block['height'] = props['input_dim'][2]
block['width'] = props['input_dim'][3]
if props.has_key('mean_file'):
block['mean_file'] = props['mean_file']
blocks.append(block)
layers = net_info['layers']
layer_num = len(layers)
i = 0 # layer id
layer_id = dict()
layer_id[props['input']] = 0
while i < layer_num:
layer = layers[i]
print i,layer['name'], layer['type']
if layer['type'] == 'Convolution':
if layer_id[layer['bottom']] != len(blocks)-1:
block = OrderedDict()
block['type'] = 'route'
block['layers'] = str(layer_id[layer['bottom']] - len(blocks))
blocks.append(block)
#assert(i+1 < layer_num and layers[i+1]['type'] == 'BatchNorm')
#assert(i+2 < layer_num and layers[i+2]['type'] == 'Scale')
conv_layer = layers[i]
block = OrderedDict()
block['type'] = 'convolutional'
block['filters'] = conv_layer['convolution_param']['num_output']
block['size'] = conv_layer['convolution_param']['kernel_size']
block['stride'] = conv_layer['convolution_param']['stride']
block['pad'] = '1'
last_layer = conv_layer
m_conv_layer = lmap[conv_layer['name']]
if i+2 < layer_num and layers[i+1]['type'] == 'BatchNorm' and layers[i+2]['type'] == 'Scale':
print i+1,layers[i+1]['name'], layers[i+1]['type']
print i+2,layers[i+2]['name'], layers[i+2]['type']
block['batch_normalize'] = '1'
bn_layer = layers[i+1]
scale_layer = layers[i+2]
last_layer = scale_layer
m_scale_layer = lmap[scale_layer['name']]
m_bn_layer = lmap[bn_layer['name']]
wdata += list(m_scale_layer.blobs[1].data) ## conv_bias <- sc_beta
wdata += list(m_scale_layer.blobs[0].data) ## bn_scale <- sc_alpha
wdata += (np.array(m_bn_layer.blobs[0].data) / m_bn_layer.blobs[2].data[0]).tolist() ## bn_mean <- bn_mean/bn_scale
wdata += (np.array(m_bn_layer.blobs[1].data) / m_bn_layer.blobs[2].data[0]).tolist() ## bn_var <- bn_var/bn_scale
i = i + 2
else:
wdata += list(m_conv_layer.blobs[1].data) ## conv_bias
wdata += list(m_conv_layer.blobs[0].data) ## conv_weights
if i+1 < layer_num and layers[i+1]['type'] == 'ReLU':
print i+1,layers[i+1]['name'], layers[i+1]['type']
act_layer = layers[i+1]
block['activation'] = 'relu'
top = act_layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
else:
block['activation'] = 'linear'
top = last_layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
elif layer['type'] == 'Pooling':
assert(layer_id[layer['bottom']] == len(blocks)-1)
block = OrderedDict()
if layer['pooling_param']['pool'] == 'AVE':
block['type'] = 'avgpool'
elif layer['pooling_param']['pool'] == 'MAX':
block['type'] = 'maxpool'
block['size'] = layer['pooling_param']['kernel_size']
block['stride'] = layer['pooling_param']['stride']
if layer['pooling_param'].has_key('pad'):
pad = int(layer['pooling_param']['pad'])
if pad > 0:
block['pad'] = '1'
top = layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
elif layer['type'] == 'Eltwise':
bottoms = layer['bottom']
bottom1 = layer_id[bottoms[0]] - len(blocks)
bottom2 = layer_id[bottoms[1]] - len(blocks)
assert(bottom1 == -1 or bottom2 == -1)
from_id = bottom2 if bottom1 == -1 else bottom1
block = OrderedDict()
block['type'] = 'shortcut'
block['from'] = str(from_id)
assert(i+1 < layer_num and layers[i+1]['type'] == 'ReLU')
block['activation'] = 'relu'
top = layers[i+1]['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 2
elif layer['type'] == 'InnerProduct':
assert(layer_id[layer['bottom']] == len(blocks)-1)
block = OrderedDict()
block['type'] = 'connected'
block['output'] = layer['inner_product_param']['num_output']
m_fc_layer = lmap[layer['name']]
wdata += list(m_fc_layer.blobs[1].data) ## fc_bias
wdata += list(m_fc_layer.blobs[0].data) ## fc_weights
if i+1 < layer_num and layers[i+1]['type'] == 'ReLU':
act_layer = layers[i+1]
block['activation'] = 'relu'
top = act_layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 2
else:
block['activation'] = 'linear'
top = layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
elif layer['type'] == 'Softmax':
assert(layer_id[layer['bottom']] == len(blocks)-1)
block = OrderedDict()
block['type'] = 'softmax'
block['groups'] = 1
top = layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
else:
print('unknown type %s' % layer['type'])
if layer_id[layer['bottom']] != len(blocks)-1:
block = OrderedDict()
block['type'] = 'route'
block['layers'] = str(layer_id[layer['bottom']] - len(blocks))
blocks.append(block)
block = OrderedDict()
block['type'] = layer['type']
top = layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
print 'done'
return blocks, np.array(wdata)
def prototxt2cfg(protofile):
net_info = parse_prototxt(protofile)
props = net_info['props']
blocks = []
block = OrderedDict()
block['type'] = 'net'
if props.has_key('input_shape'):
block['batch'] = props['input_shape']['dim'][0]
block['channels'] = props['input_shape']['dim'][1]
block['height'] = props['input_shape']['dim'][2]
block['width'] = props['input_shape']['dim'][3]
else:
block['batch'] = props['input_dim'][0]
block['channels'] = props['input_dim'][1]
block['height'] = props['input_dim'][2]
block['width'] = props['input_dim'][3]
if props.has_key('mean_file'):
block['mean_file'] = props['mean_file']
blocks.append(block)
layers = net_info['layers']
layer_num = len(layers)
i = 0 # layer id
layer_id = dict()
layer_id[props['input']] = 0
while i < layer_num:
layer = layers[i]
print i,layer['name'], layer['type']
if layer['type'] == 'Convolution':
if layer_id[layer['bottom']] != len(blocks)-1:
block = OrderedDict()
block['type'] = 'route'
block['layers'] = str(layer_id[layer['bottom']] - len(blocks))
blocks.append(block)
conv_layer = layers[i]
block = OrderedDict()
block['type'] = 'convolutional'
block['filters'] = conv_layer['convolution_param']['num_output']
block['size'] = conv_layer['convolution_param']['kernel_size']
block['stride'] = '1'
if conv_layer['convolution_param'].has_key('stride'):
block['stride'] = conv_layer['convolution_param']['stride']
block['pad'] = '1'
last_layer = conv_layer
if i+2 < layer_num and layers[i+1]['type'] == 'BatchNorm' and layers[i+2]['type'] == 'Scale':
print i+1,layers[i+1]['name'], layers[i+1]['type']
print i+2,layers[i+2]['name'], layers[i+2]['type']
block['batch_normalize'] = '1'
bn_layer = layers[i+1]
scale_layer = layers[i+2]
last_layer = scale_layer
i = i + 2
if i+1 < layer_num and layers[i+1]['type'] == 'ReLU':
print i+1,layers[i+1]['name'], layers[i+1]['type']
act_layer = layers[i+1]
block['activation'] = 'relu'
top = act_layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
else:
block['activation'] = 'linear'
top = last_layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
elif layer['type'] == 'Pooling':
assert(layer_id[layer['bottom']] == len(blocks)-1)
block = OrderedDict()
if layer['pooling_param']['pool'] == 'AVE':
block['type'] = 'avgpool'
elif layer['pooling_param']['pool'] == 'MAX':
block['type'] = 'maxpool'
block['size'] = layer['pooling_param']['kernel_size']
block['stride'] = layer['pooling_param']['stride']
if layer['pooling_param'].has_key('pad'):
pad = int(layer['pooling_param']['pad'])
if pad > 0:
block['pad'] = '1'
top = layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
elif layer['type'] == 'Eltwise':
bottoms = layer['bottom']
bottom1 = layer_id[bottoms[0]] - len(blocks)
bottom2 = layer_id[bottoms[1]] - len(blocks)
assert(bottom1 == -1 or bottom2 == -1)
from_id = bottom2 if bottom1 == -1 else bottom1
block = OrderedDict()
block['type'] = 'shortcut'
block['from'] = str(from_id)
assert(i+1 < layer_num and layers[i+1]['type'] == 'ReLU')
block['activation'] = 'relu'
top = layers[i+1]['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 2
elif layer['type'] == 'InnerProduct':
assert(layer_id[layer['bottom']] == len(blocks)-1)
block = OrderedDict()
block['type'] = 'connected'
block['output'] = layer['inner_product_param']['num_output']
if i+1 < layer_num and layers[i+1]['type'] == 'ReLU':
act_layer = layers[i+1]
block['activation'] = 'relu'
top = act_layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 2
else:
block['activation'] = 'linear'
top = layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
elif layer['type'] == 'Softmax':
assert(layer_id[layer['bottom']] == len(blocks)-1)
block = OrderedDict()
block['type'] = 'softmax'
block['groups'] = 1
top = layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
else:
print('unknown type %s' % layer['type'])
if layer_id[layer['bottom']] != len(blocks)-1:
block = OrderedDict()
block['type'] = 'route'
block['layers'] = str(layer_id[layer['bottom']] - len(blocks))
blocks.append(block)
block = OrderedDict()
block['type'] = layer['type']
top = layer['top']
layer_id[top] = len(blocks)
blocks.append(block)
i = i + 1
print 'done'
return blocks
def save_weights(data, weightfile):
print 'Save to ', weightfile
wsize = data.size
weights = np.zeros((wsize+4,), dtype=np.int32)
## write info
weights[0] = 0
weights[1] = 1
weights[2] = 0 ## revision
weights[3] = 0 ## net.seen
weights.tofile(weightfile)
weights = np.fromfile(weightfile, dtype=np.float32)
weights[4:] = data
weights.tofile(weightfile)
if __name__ == '__main__':
import sys
if len(sys.argv) != 5:
print('try:')
print(' python caffe2darknet.py ResNet-50-deploy.prototxt ResNet-50-model.caffemodel ResNet-50-model.cfg ResNet-50-model.weights')
exit()
protofile = sys.argv[1]
caffemodel = sys.argv[2]
cfgfile = sys.argv[3]
weightfile = sys.argv[4]
blocks, data = caffe2darknet(protofile, caffemodel)
save_weights(data, weightfile)
save_cfg(blocks, cfgfile)
print_cfg(blocks)
print_cfg_nicely(blocks)
| 39.971831
| 139
| 0.5
| 1,618
| 14,190
| 4.249691
| 0.093325
| 0.013962
| 0.029087
| 0.061082
| 0.836097
| 0.833479
| 0.807592
| 0.797411
| 0.796102
| 0.782723
| 0
| 0.01922
| 0.343693
| 14,190
| 354
| 140
| 40.084746
| 0.719102
| 0.024383
| 0
| 0.829787
| 0
| 0.00304
| 0.147499
| 0.005356
| 0
| 0
| 0
| 0
| 0.030395
| 0
| null | null | 0
| 0.015198
| null | null | 0.054711
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
10c637bbbbc238e8ebd45a741307e4953fa9af9b
| 4,055
|
py
|
Python
|
60_Grainsize_project/DL_functions/DL_helper_functions.py
|
htorodriguez/grainsize_measure
|
ea72ae09fb6414dbd695d29d40362be9d5b451c1
|
[
"MIT"
] | null | null | null |
60_Grainsize_project/DL_functions/DL_helper_functions.py
|
htorodriguez/grainsize_measure
|
ea72ae09fb6414dbd695d29d40362be9d5b451c1
|
[
"MIT"
] | null | null | null |
60_Grainsize_project/DL_functions/DL_helper_functions.py
|
htorodriguez/grainsize_measure
|
ea72ae09fb6414dbd695d29d40362be9d5b451c1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 5 11:28:03 2020
@author: hto_r
"""
# =============================================================================
# Note I wrote this helper functions without knowing about itertools
# know knowing that it exists, they are obsolete. In the next version they will
# disappear
# =============================================================================
def make_param_list_5(l1, l2, l3, l4, l5):
"""Function to make a list of all possible permutations of several list
args: several lists li
return: list of lists with all possible permutations
"""
combination_list=[]
for i in l1:
for j in l2:
for k in l3:
for l in l4:
for m in l5:
combination_list.append((i,j,k,l,m))
return(combination_list)
def make_param_list_6(l1, l2, l3, l4, l5, l6):
"""Function to make a list of all possible permutations of several list
args: several lists li
return: list of lists with all possible permutations
"""
combination_list=[]
for i in l1:
for j in l2:
for k in l3:
for l in l4:
for m in l5:
for n in l6:
combination_list.append((i,j,k,l,m, n))
return(combination_list)
def make_param_list_9(l1, l2, l3, l4, l5, l6, l7, l8, l9):
"""Function to make a list of all possible permutations of several list
args: several lists li
return: list of lists with all possible permutations
"""
combination_list=[]
for i in l1:
for j in l2:
for k in l3:
for l in l4:
for m in l5:
for n in l6:
for o in l7:
for p in l8:
for q in l9:
combination_list.append((i,j,k,l,m,n,o,p,q))
return(combination_list)
def make_param_list_11(l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11):
"""Function to make a list of all possible permutations of several list
args: several lists li
return: list of lists with all possible permutations
"""
combination_list=[]
for i in l1:
for j in l2:
for k in l3:
for l in l4:
for m in l5:
for n in l6:
for o in l7:
for p in l8:
for q in l9:
for r in l10:
for s in l11:
combination_list.append((i,j,k,l,m,n,o,p,q,r,s))
return(combination_list)
def make_param_list_15(l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15):
"""Function to make a list of all possible permutations of several list
args: several lists li
return: list of lists with all possible permutations
"""
combination_list=[]
for i in l1:
for j in l2:
for k in l3:
for l in l4:
for m in l5:
for n in l6:
for o in l7:
for p in l8:
for q in l9:
for r in l10:
for s in l11:
for t in l12:
for u in l13:
for v in l14:
for w in l15:
combination_list.append((i,j,k,l,m,n,o,p,q,r,s,t,u,v,w))
return(combination_list)
| 39.754902
| 121
| 0.417756
| 501
| 4,055
| 3.319361
| 0.177645
| 0.135298
| 0.138304
| 0.048106
| 0.837643
| 0.83163
| 0.824414
| 0.735418
| 0.719784
| 0.692724
| 0
| 0.060275
| 0.480395
| 4,055
| 102
| 122
| 39.754902
| 0.728999
| 0.274969
| 0
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0
| 0
| 0.075758
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
10ebf10908fa578faa7d4e3048b7adc0eced2bbc
| 30,585
|
py
|
Python
|
nfdapi/nfdcore/migrations/0005_auto_20170919_1506.py
|
kappu72/clevmetro-nfd
|
584c638190eaa077d010a24fe7f209b1cbb3725d
|
[
"BSD-2-Clause"
] | 3
|
2018-02-11T21:18:11.000Z
|
2019-01-19T06:58:58.000Z
|
nfdapi/nfdcore/migrations/0005_auto_20170919_1506.py
|
ricardogsilva/clevmetro-nfd
|
a7fc4de3f7930899cb3725ca8359f420d924aa12
|
[
"BSD-2-Clause"
] | 108
|
2018-02-02T15:42:39.000Z
|
2019-01-21T13:22:55.000Z
|
nfdapi/nfdcore/migrations/0005_auto_20170919_1506.py
|
ricardogsilva/clevmetro-nfd
|
a7fc4de3f7930899cb3725ca8359f420d924aa12
|
[
"BSD-2-Clause"
] | 5
|
2018-02-02T11:52:48.000Z
|
2022-03-01T16:09:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-19 15:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nfdcore', '0004_auto_20170901_1541'),
]
operations = [
migrations.CreateModel(
name='Aspect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CanopyCover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ConiferDetails',
fields=[
('taxondetails_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='nfdcore.TaxonDetails')),
('area_ranges', models.TextField(blank=True, null=True)),
('leap_land_cover_category', models.TextField(blank=True, null=True)),
('aspect', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.Aspect')),
],
options={
'abstract': False,
},
bases=('nfdcore.taxondetails',),
),
migrations.CreateModel(
name='ConiferLifestages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vegetative', models.FloatField(blank=True, default=0.0, null=True)),
('immature_ovulate_cones', models.FloatField(blank=True, default=0.0, null=True)),
('mature_ovulate_cones', models.FloatField(blank=True, default=0.0, null=True)),
('spent_ovulate_cones', models.FloatField(blank=True, default=0.0, null=True)),
('immature_pollen_cones', models.FloatField(blank=True, default=0.0, null=True)),
('mature_pollen_cones', models.FloatField(blank=True, default=0.0, null=True)),
('spent_pollen_cones', models.FloatField(blank=True, default=0.0, null=True)),
],
),
migrations.CreateModel(
name='DisturbanceType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('browse', models.FloatField(blank=True, default=0.0, null=True)),
('collecting', models.FloatField(blank=True, default=0.0, null=True)),
('disease_pest', models.FloatField(blank=True, default=0.0, null=True)),
('mowing', models.FloatField(blank=True, default=0.0, null=True)),
('trampling', models.FloatField(blank=True, default=0.0, null=True)),
('vehicle_traffic', models.FloatField(blank=True, default=0.0, null=True)),
('woody_plant_removal', models.FloatField(blank=True, default=0.0, null=True)),
],
),
migrations.CreateModel(
name='EarthwormEvidence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('casting_piles', models.FloatField(blank=True, default=0.0, null=True)),
('compacted_soil', models.FloatField(blank=True, default=0.0, null=True)),
('individuals', models.FloatField(blank=True, default=0.0, null=True)),
('layered_castings', models.FloatField(blank=True, default=0.0, null=True)),
('middens', models.FloatField(blank=True, default=0.0, null=True)),
('no_evidence', models.FloatField(blank=True, default=0.0, null=True)),
],
),
migrations.CreateModel(
name='FernDetails',
fields=[
('taxondetails_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='nfdcore.TaxonDetails')),
('area_ranges', models.TextField(blank=True, null=True)),
('leap_land_cover_category', models.TextField(blank=True, null=True)),
('aspect', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.Aspect')),
('disturbance_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.DisturbanceType')),
('earthworm_evidence', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.EarthwormEvidence')),
],
options={
'abstract': False,
},
bases=('nfdcore.taxondetails',),
),
migrations.CreateModel(
name='FernLifestages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FloweringPlantDetails',
fields=[
('taxondetails_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='nfdcore.TaxonDetails')),
('area_ranges', models.TextField(blank=True, null=True)),
('leap_land_cover_category', models.TextField(blank=True, null=True)),
('aspect', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.Aspect')),
('disturbance_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.DisturbanceType')),
('earthworm_evidence', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.EarthwormEvidence')),
],
options={
'abstract': False,
},
bases=('nfdcore.taxondetails',),
),
migrations.CreateModel(
name='FloweringPlantLifestages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GeneralHabitatCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GroundSurface',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LandscapePosition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MoistureRegime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MossDetails',
fields=[
('taxondetails_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='nfdcore.TaxonDetails')),
('area_ranges', models.TextField(blank=True, null=True)),
('leap_land_cover_category', models.TextField(blank=True, null=True)),
('aspect', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.Aspect')),
('disturbance_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.DisturbanceType')),
('earthworm_evidence', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.EarthwormEvidence')),
('general_habitat_category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.GeneralHabitatCategory')),
('ground_surface', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.GroundSurface')),
('landscape_position', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.LandscapePosition')),
],
options={
'abstract': False,
},
bases=('nfdcore.taxondetails',),
),
migrations.CreateModel(
name='MossLifestages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PlantCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Slope',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.TextField(unique=True)),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='StreamSubstrate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artificial', models.FloatField(blank=True, default=0.0, null=True)),
('bedrock', models.FloatField(blank=True, default=0.0, null=True)),
('boulder', models.FloatField(blank=True, default=0.0, null=True)),
('boulder_slab', models.FloatField(blank=True, default=0.0, null=True)),
('clay_hardpan', models.FloatField(blank=True, default=0.0, null=True)),
('cobble', models.FloatField(blank=True, default=0.0, null=True)),
('fine_detritus', models.FloatField(blank=True, default=0.0, null=True)),
('gravel', models.FloatField(blank=True, default=0.0, null=True)),
('leafpack_woody_debris', models.FloatField(blank=True, default=0.0, null=True)),
('muck', models.FloatField(blank=True, default=0.0, null=True)),
('sand', models.FloatField(blank=True, default=0.0, null=True)),
('silt', models.FloatField(blank=True, default=0.0, null=True)),
],
),
migrations.RemoveField(
model_name='plantdetails',
name='taxondetails_ptr',
),
migrations.RemoveField(
model_name='slimemoldlifestages',
name='code',
),
migrations.RemoveField(
model_name='slimemoldlifestages',
name='name',
),
migrations.AddField(
model_name='occurrencenaturalarea',
name='verified',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='occurrencetaxon',
name='verified',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='slimemolddetails',
name='lifestages',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.SlimeMoldLifestages'),
),
migrations.AddField(
model_name='slimemoldlifestages',
name='sclerotium_color',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='slimemoldlifestages',
name='sclerotium_size',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AddField(
model_name='slimemoldlifestages',
name='sporangia_color',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='slimemoldlifestages',
name='sporangia_size',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AddField(
model_name='slimemoldlifestages',
name='streaming_body_color',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='slimemoldlifestages',
name='streaming_body_size',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='adult',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='adult_pregnant_or_young',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='early_instar_larva',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='early_instar_nymph',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='early_pupa',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='egg',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='egg_mass',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='immature',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='juvenile',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='larva',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='late_instar_larva',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='late_instar_nymph',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='late_pupa',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='na',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='nest',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='nymph',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='pupa',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='senescent',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='subadult',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='animallifestages',
name='unknown',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='pondlakeanimaldetails',
name='lentic_size_acres_aprox',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='pondlakeanimaldetails',
name='lentic_size_acres_exact',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=6, null=True),
),
migrations.AlterField(
model_name='pondlakeanimaldetails',
name='lentic_size_squaremeters_aprox',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='pondlakeanimaldetails',
name='lentic_size_squaremeters_exact',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=8, null=True),
),
migrations.AlterField(
model_name='pondlakeanimaldetails',
name='microhabitat_comments',
field=models.TextField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='pondlakeanimaldetails',
name='pond_lake_name',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='streamanimaldetails',
name='stream_name_1',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='streamanimaldetails',
name='substrate',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='nfdcore.StreamSubstrate'),
),
migrations.AlterField(
model_name='wetlandanimaldetails',
name='lentic_size_acres_aprox',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='wetlandanimaldetails',
name='lentic_size_acres_exact',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=6, null=True),
),
migrations.AlterField(
model_name='wetlandanimaldetails',
name='lentic_size_squaremeters_aprox',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='wetlandanimaldetails',
name='lentic_size_squaremeters_exact',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=8, null=True),
),
migrations.AlterField(
model_name='wetlandanimaldetails',
name='wetland_name',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wetlandvetegationstructure',
name='buttonbush',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='wetlandvetegationstructure',
name='cattail',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='wetlandvetegationstructure',
name='ferns',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='wetlandvetegationstructure',
name='forbs',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='wetlandvetegationstructure',
name='phragmites',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='wetlandvetegationstructure',
name='sedges',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.DeleteModel(
name='PlantDetails',
),
migrations.DeleteModel(
name='StreamSubstracte',
),
migrations.AddField(
model_name='mossdetails',
name='lifestages',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.MossLifestages'),
),
migrations.AddField(
model_name='mossdetails',
name='moisture_regime',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.MoistureRegime'),
),
migrations.AddField(
model_name='mossdetails',
name='plant_count',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.PlantCount'),
),
migrations.AddField(
model_name='mossdetails',
name='slope',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.Slope'),
),
migrations.AddField(
model_name='mossdetails',
name='tree_canopy_cover',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.CanopyCover'),
),
migrations.AddField(
model_name='floweringplantdetails',
name='general_habitat_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.GeneralHabitatCategory'),
),
migrations.AddField(
model_name='floweringplantdetails',
name='ground_surface',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.GroundSurface'),
),
migrations.AddField(
model_name='floweringplantdetails',
name='landscape_position',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.LandscapePosition'),
),
migrations.AddField(
model_name='floweringplantdetails',
name='lifestages',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.FloweringPlantLifestages'),
),
migrations.AddField(
model_name='floweringplantdetails',
name='moisture_regime',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.MoistureRegime'),
),
migrations.AddField(
model_name='floweringplantdetails',
name='plant_count',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.PlantCount'),
),
migrations.AddField(
model_name='floweringplantdetails',
name='slope',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.Slope'),
),
migrations.AddField(
model_name='floweringplantdetails',
name='tree_canopy_cover',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.CanopyCover'),
),
migrations.AddField(
model_name='ferndetails',
name='general_habitat_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.GeneralHabitatCategory'),
),
migrations.AddField(
model_name='ferndetails',
name='ground_surface',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.GroundSurface'),
),
migrations.AddField(
model_name='ferndetails',
name='landscape_position',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.LandscapePosition'),
),
migrations.AddField(
model_name='ferndetails',
name='lifestages',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.FernLifestages'),
),
migrations.AddField(
model_name='ferndetails',
name='moisture_regime',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.MoistureRegime'),
),
migrations.AddField(
model_name='ferndetails',
name='plant_count',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.PlantCount'),
),
migrations.AddField(
model_name='ferndetails',
name='slope',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.Slope'),
),
migrations.AddField(
model_name='ferndetails',
name='tree_canopy_cover',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.CanopyCover'),
),
migrations.AddField(
model_name='coniferdetails',
name='disturbance_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.DisturbanceType'),
),
migrations.AddField(
model_name='coniferdetails',
name='earthworm_evidence',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.EarthwormEvidence'),
),
migrations.AddField(
model_name='coniferdetails',
name='general_habitat_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.GeneralHabitatCategory'),
),
migrations.AddField(
model_name='coniferdetails',
name='ground_surface',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.GroundSurface'),
),
migrations.AddField(
model_name='coniferdetails',
name='landscape_position',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.LandscapePosition'),
),
migrations.AddField(
model_name='coniferdetails',
name='lifestages',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.ConiferLifestages'),
),
migrations.AddField(
model_name='coniferdetails',
name='moisture_regime',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.MoistureRegime'),
),
migrations.AddField(
model_name='coniferdetails',
name='plant_count',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.PlantCount'),
),
migrations.AddField(
model_name='coniferdetails',
name='slope',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.Slope'),
),
migrations.AddField(
model_name='coniferdetails',
name='tree_canopy_cover',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='nfdcore.CanopyCover'),
),
]
| 45.785928
| 203
| 0.588556
| 2,941
| 30,585
| 5.988779
| 0.073784
| 0.066428
| 0.047238
| 0.061773
| 0.924715
| 0.923068
| 0.880259
| 0.876285
| 0.867882
| 0.808551
| 0
| 0.007462
| 0.28138
| 30,585
| 667
| 204
| 45.854573
| 0.793894
| 0.002223
| 0
| 0.795455
| 1
| 0
| 0.164515
| 0.059251
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004545
| 0
| 0.009091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8001065737562a9ac6400a46ac82cb85ea35cd06
| 204
|
py
|
Python
|
omoide/migration_engine/classes/__init__.py
|
TaXeH/Omoide
|
8ccc9d47e802433bb2de21ff930e6630658cd5e3
|
[
"MIT"
] | null | null | null |
omoide/migration_engine/classes/__init__.py
|
TaXeH/Omoide
|
8ccc9d47e802433bb2de21ff930e6630658cd5e3
|
[
"MIT"
] | null | null | null |
omoide/migration_engine/classes/__init__.py
|
TaXeH/Omoide
|
8ccc9d47e802433bb2de21ff930e6630658cd5e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from omoide.migration_engine.classes.class_relocation import *
from omoide.migration_engine.classes.class_renderer import *
from omoide.migration_engine.classes.class_sql import *
| 40.8
| 62
| 0.813725
| 27
| 204
| 5.925926
| 0.481481
| 0.1875
| 0.35625
| 0.46875
| 0.76875
| 0.76875
| 0.5375
| 0
| 0
| 0
| 0
| 0.005348
| 0.083333
| 204
| 4
| 63
| 51
| 0.850267
| 0.102941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
33f9d78f0289cbade8b10952f73211de74bbf5cc
| 16,221
|
py
|
Python
|
models/rotation_model.py
|
r-o-s-h-a-n/semisupervisedFL
|
4c568b4a9cead5aa57f403c1e1bc10e2eaac07e3
|
[
"MIT"
] | 5
|
2020-02-25T00:24:11.000Z
|
2021-03-19T12:28:14.000Z
|
models/rotation_model.py
|
r-o-s-h-a-n/semisupervisedFL
|
4c568b4a9cead5aa57f403c1e1bc10e2eaac07e3
|
[
"MIT"
] | 9
|
2020-02-11T02:33:56.000Z
|
2021-11-10T19:54:17.000Z
|
models/rotation_model.py
|
r-o-s-h-a-n/semisupervisedFL
|
4c568b4a9cead5aa57f403c1e1bc10e2eaac07e3
|
[
"MIT"
] | 2
|
2020-02-13T15:12:02.000Z
|
2020-05-28T18:23:17.000Z
|
import random
import collections
import warnings
from six.moves import range
import numpy as np
import math
import six
import tensorflow as tf
import tensorflow_federated as tff
from models.model import Model
from models.initializers import ConvInitializer, DenseInitializer
from models.layers import create_NIN_block, GlobalAveragePooling
'''
"Deep model" refers to our implementation of the full NIN rotation net model described in
Gidaris, Spyros, Praveer Singh, and Nikos Komodakis. "Unsupervised representation
learning by predicting image rotations." arXiv preprint arXiv:1803.07728 (2018).
"Simple model" refers to a shallower network used in our experiments which is based on the
deep model.
'''
DEEP_NCHANNELS1 = 192
DEEP_NCHANNELS2 = 160
DEEP_NCHANNELS3 = 96
SIMPLE_NCHANNELS1 = 32
SIMPLE_NCHANNELS2 = 64
INPUT_SHAPES = {'emnist': [28,28,1], 'cifar100': [32,32,3], 'cifar10central': [32,32,3]}
OUTPUT_SHAPES = {'emnist': 10, 'cifar100': 20, 'cifar10central': 10}
def create_deep_feature_extractor_block(input_shape):
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape),
# block 1
create_NIN_block(DEEP_NCHANNELS1, 5, name='F_Block1_Conv1', input_shape=input_shape),
create_NIN_block(DEEP_NCHANNELS2, 1, name='F_Block1_Conv2'),
create_NIN_block(DEEP_NCHANNELS3, 1, name='F_Block1_Conv3'),
tf.keras.layers.MaxPool2D(pool_size=3,strides=2,padding='same', name='F_Block1_MaxPool'),
# block 2
create_NIN_block(DEEP_NCHANNELS1, 5, name='F_Block2_Conv1'),
create_NIN_block(DEEP_NCHANNELS1, 1, name='F_Block2_Conv2'),
create_NIN_block(DEEP_NCHANNELS1, 1, name='F_Block2_Conv3'),
tf.keras.layers.AveragePooling2D(pool_size=3,strides=2,padding='same', name='F_Block2_AvgPool')
], name='Feature_Extractor')
return model
def create_deep_label_classifier_block(input_shape, num_classes=10):
model = tf.keras.models.Sequential([
# block 3
create_NIN_block(DEEP_NCHANNELS1, 3, name='L_Block3_Conv1'),
create_NIN_block(DEEP_NCHANNELS1, 1, name='L_Block3_Conv2'),
create_NIN_block(DEEP_NCHANNELS1, 1, name='L_Block3_Conv3'),
GlobalAveragePooling(name='L_Global_Avg_Pool'),
tf.keras.layers.Dense(num_classes,
name='L_Linear_Classifier',
activation='softmax',
kernel_initializer=DenseInitializer(num_classes))
],
name = 'Label_Classifier')
return model
def create_deep_rotation_classifier_block(input_shape, num_classes=4):
model = tf.keras.models.Sequential([
# block 3
create_NIN_block(DEEP_NCHANNELS1, 3, name='R_Block3_Conv1'),
create_NIN_block(DEEP_NCHANNELS1, 1, name='R_Block3_Conv2'),
create_NIN_block(DEEP_NCHANNELS1, 1, name='R_Block3_Conv3'),
# block 4
create_NIN_block(DEEP_NCHANNELS1, 3, name='R_Block4_Conv1'),
create_NIN_block(DEEP_NCHANNELS1, 1, name='R_Block4_Conv2'),
create_NIN_block(DEEP_NCHANNELS1, 1, name='R_Block4_Conv3'),
GlobalAveragePooling(name='R_Global_Avg_Pool'),
tf.keras.layers.Dense(num_classes,
name='R_Linear_Classifier',
activation='softmax',
kernel_initializer=DenseInitializer(num_classes))
],
name = 'Rotation_Classifier')
return model
def create_simple_feature_extractor_block(input_shape):
model = tf.keras.models.Sequential([
# block 1
create_NIN_block(SIMPLE_NCHANNELS1, 3, name='F_Block1_Conv1', input_shape=input_shape),
create_NIN_block(SIMPLE_NCHANNELS2, 1, name='F_Block1_Conv2'),
create_NIN_block(SIMPLE_NCHANNELS2, 1, name='F_Block1_Conv3'),
tf.keras.layers.MaxPooling2D(3, strides=2, padding='same', name='F_maxpool')
], name='Feature_Extractor')
return model
def create_simple_label_classifier_block(input_shape, num_classes=10):
model = tf.keras.models.Sequential([
# block 2
create_NIN_block(SIMPLE_NCHANNELS2, 3, name='L_Block2_Conv1'),
create_NIN_block(SIMPLE_NCHANNELS2, 1, name='L_Block2_Conv2'),
create_NIN_block(SIMPLE_NCHANNELS2, 1, name='L_Block2_Conv3'),
tf.keras.layers.MaxPooling2D(3, strides=2, padding='same', name='L_maxpool'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512,
name='L_Hidden_Layer',
activation='relu'
),
tf.keras.layers.Dense(num_classes,
name='L_Linear_Classifier',
activation='softmax'
)
],
name = 'Label_Classifier')
return model
def create_simple_rotation_classifier_block(input_shape, num_classes=4):
model = tf.keras.models.Sequential([
# block 2
create_NIN_block(SIMPLE_NCHANNELS1, 5, name='R_Block2_Conv1'),
create_NIN_block(SIMPLE_NCHANNELS2, 1, name='R_Block2_Conv2'),
create_NIN_block(SIMPLE_NCHANNELS2, 1, name='R_Block2_Conv3'),
tf.keras.layers.MaxPooling2D(3, strides=2, padding='same', name='L_maxpool'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512,
name='R_Hidden_Layer',
activation='relu',
),
tf.keras.layers.Dense(num_classes,
name='R_Linear_Classifier',
activation='softmax',
)
],
name = 'Rotation_Classifier')
return model
class RotationSupervisedModel(Model):
def __init__(self, ph):
Model.__init__(self, ph)
self.input_shape = INPUT_SHAPES[ph['dataset']]
self.output_shape = OUTPUT_SHAPES[self.ph['dataset']]
self.pretrained_model_fp = self.ph.setdefault('pretrained_model_fp', None)
if self.pretrained_model_fp:
print('training on a pretrained model')
def preprocess_emnist(self,
dataset,
num_epochs,
shuffle_buffer,
batch_size,
learning_env):
assert learning_env in ('central', 'federated')
if learning_env == 'central':
num_epochs = 1
def element_fn(element):
return (tf.expand_dims(element['pixels'], 2),
tf.reshape(element['label'], [1]))
return dataset.filter(lambda x: not x['is_masked_supervised'] if 'is_masked_supervised' in x else True).map(element_fn
).repeat(num_epochs).shuffle(shuffle_buffer).batch(batch_size)
def preprocess_cifar100(self,
dataset,
num_epochs,
shuffle_buffer,
batch_size,
learning_env):
assert learning_env in ('central', 'federated')
if learning_env == 'central':
num_epochs = 1
def element_fn(element):
img = element['image']
img = tf.cast(img, tf.float32)
img = tf.math.subtract(img, tf.convert_to_tensor([255*0.49139968, 255*0.48215841, 255*0.44653091], dtype=tf.float32))
img = tf.math.divide(img, tf.convert_to_tensor([255*0.24703223, 255*0.24348513, 255*0.26158784], dtype=tf.float32))
return (img,
tf.reshape(element['label'], [1]))
return dataset.filter(lambda x: not x['is_masked_supervised'] if 'is_masked_supervised' in x else True).map(element_fn
).shuffle(shuffle_buffer).repeat(num_epochs).batch(batch_size)
def preprocess_cifar10central(self,
dataset,
num_epochs,
shuffle_buffer,
batch_size,
learning_env):
assert learning_env in ('central', 'federated')
if learning_env == 'central':
num_epochs = 1
def element_fn(element):
img = element['image']
img = tf.cast(img, tf.float32)
img = tf.math.subtract(img, tf.convert_to_tensor([255*0.49139968, 255*0.48215841, 255*0.44653091], dtype=tf.float32))
img = tf.math.divide(img, tf.convert_to_tensor([255*0.24703223, 255*0.24348513, 255*0.26158784], dtype=tf.float32))
return (img,
tf.reshape(element['label'], [1]))
return dataset.filter(lambda x: not x['is_masked_supervised'] if 'is_masked_supervised' in x else True).map(element_fn
).shuffle(shuffle_buffer).repeat(num_epochs).batch(batch_size)
class DeepRotationSupervisedModel(RotationSupervisedModel):
def __init__(self, ph):
super(DeepRotationSupervisedModel, self).__init__(ph)
def __call__(self):
'''
Returns a compiled keras model.
'''
model = tf.keras.models.Sequential([
create_deep_feature_extractor_block(self.input_shape),
create_deep_label_classifier_block(self.input_shape, self.output_shape)
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=self.optimizer(learning_rate=self.learning_rate,
nesterov=self.nesterov,
momentum=self.momentum,
decay=self.decay),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
if self.pretrained_model_fp:
model.load_weights(self.pretrained_model_fp, by_name=True)
return model
class SimpleRotationSupervisedModel(RotationSupervisedModel):
def __init__(self, ph):
super(SimpleRotationSupervisedModel, self).__init__(ph)
def __call__(self):
'''
Returns a compiled keras model.
'''
model = tf.keras.models.Sequential([
create_simple_feature_extractor_block(self.input_shape),
create_simple_label_classifier_block(self.input_shape, self.output_shape)
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=self.optimizer(learning_rate=self.learning_rate,
nesterov=self.nesterov,
momentum=self.momentum,
decay=self.decay),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
if self.pretrained_model_fp:
model.load_weights(self.pretrained_model_fp, by_name=True)
return model
class RotationSelfSupervisedModel(Model):
'''
Predicts rotation of images
'''
def __init__(self, ph):
Model.__init__(self, ph)
self.input_shape = INPUT_SHAPES[ph['dataset']]
def preprocess_emnist(self,
dataset,
num_epochs,
shuffle_buffer,
batch_size,
learning_env):
assert learning_env in ('central', 'federated')
if learning_env == 'central':
num_epochs = 1
def element_fn(element):
img = tf.expand_dims(element['pixels'], 2)
rotated_elements = (
tf.data.Dataset.from_tensor_slices([tf.image.rot90(img, rot) for rot in [0, 1, 2, 3]]),
tf.data.Dataset.from_tensor_slices([[0],[1],[2],[3]])
)
return tf.data.Dataset.zip(rotated_elements)
return dataset.filter(lambda x: not x['is_masked_unsupervised'] if 'is_masked_unsupervised' in x else True).shuffle(
shuffle_buffer).flat_map(element_fn).repeat(num_epochs).batch(batch_size)
def preprocess_cifar100(self,
dataset,
num_epochs,
shuffle_buffer,
batch_size,
learning_env):
assert learning_env in ('central', 'federated')
if learning_env == 'central':
num_epochs = 1
def element_fn(element):
img = tf.cast(element['image'], tf.float32)
img = tf.math.subtract(img, tf.convert_to_tensor([255*0.49139968, 255*0.48215841, 255*0.44653091], dtype=tf.float32))
img = tf.math.divide(img, tf.convert_to_tensor([255*0.24703223, 255*0.24348513, 255*0.26158784], dtype=tf.float32))
rotated_elements = (
tf.data.Dataset.from_tensor_slices([tf.image.rot90(img, rot) for rot in [0, 1, 2, 3]]),
tf.data.Dataset.from_tensor_slices([[0],[1],[2],[3]])
)
return tf.data.Dataset.zip(rotated_elements)
return dataset.filter(lambda x: not x['is_masked_unsupervised'] if 'is_masked_unsupervised' in x else True).shuffle(
shuffle_buffer).flat_map(element_fn).batch(batch_size).repeat(num_epochs)
def preprocess_cifar10central(self,
dataset,
num_epochs,
shuffle_buffer,
batch_size,
learning_env):
assert learning_env in ('central', 'federated')
if learning_env == 'central':
num_epochs = 1
def element_fn(element):
img = tf.cast(element['image'], tf.float32)
img = tf.math.subtract(img, tf.convert_to_tensor([255*0.49139968, 255*0.48215841, 255*0.44653091], dtype=tf.float32))
img = tf.math.divide(img, tf.convert_to_tensor([255*0.24703223, 255*0.24348513, 255*0.26158784], dtype=tf.float32))
rotated_elements = (
tf.data.Dataset.from_tensor_slices([tf.image.rot90(img, rot) for rot in [0, 1, 2, 3]]),
tf.data.Dataset.from_tensor_slices([[0],[1],[2],[3]])
)
return tf.data.Dataset.zip(rotated_elements)
return dataset.filter(lambda x: not x['is_masked_unsupervised'] if 'is_masked_unsupervised' in x else True).shuffle(
shuffle_buffer).flat_map(element_fn).repeat(num_epochs).batch(batch_size)
class SimpleRotationSelfSupervisedModel(RotationSelfSupervisedModel):
'''
Predicts rotation of images
'''
def __init__(self, ph):
super(SimpleRotationSelfSupervisedModel, self).__init__(ph)
def __call__(self):
'''
Returns a compiled keras model.
'''
model = tf.keras.models.Sequential([
create_simple_feature_extractor_block(self.input_shape),
create_simple_rotation_classifier_block(self.input_shape, num_classes=4)
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=self.optimizer(learning_rate=self.learning_rate,
nesterov=self.nesterov,
momentum=self.momentum,
decay=self.decay),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
class DeepRotationSelfSupervisedModel(RotationSelfSupervisedModel):
'''
Predicts rotation of images
'''
def __init__(self, ph):
super(DeepRotationSelfSupervisedModel, self).__init__(ph)
def __call__(self):
'''
Returns a compiled keras model.
'''
model = tf.keras.models.Sequential([
create_deep_feature_extractor_block(self.input_shape),
create_deep_rotation_classifier_block(self.input_shape, num_classes=4)
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=self.optimizer(learning_rate=self.learning_rate,
nesterov=self.nesterov,
momentum=self.momentum,
decay=self.decay),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
| 38.899281
| 129
| 0.606374
| 1,808
| 16,221
| 5.168695
| 0.122235
| 0.02397
| 0.037453
| 0.028892
| 0.843767
| 0.824612
| 0.803852
| 0.786196
| 0.76886
| 0.69053
| 0
| 0.048668
| 0.291906
| 16,221
| 417
| 130
| 38.899281
| 0.764931
| 0.017015
| 0
| 0.701695
| 0
| 0
| 0.081569
| 0.008573
| 0
| 0
| 0
| 0
| 0.020339
| 1
| 0.094915
| false
| 0
| 0.040678
| 0.00339
| 0.230508
| 0.00339
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
33fc18c9018161d3711db4d35f5efe245a5c8b5a
| 135
|
py
|
Python
|
gigasecond/gigasecond.py
|
Isaac-Tolu/exercism-python
|
17c26b446e1f79a24daf6736dcf9982c16d06c50
|
[
"MIT"
] | null | null | null |
gigasecond/gigasecond.py
|
Isaac-Tolu/exercism-python
|
17c26b446e1f79a24daf6736dcf9982c16d06c50
|
[
"MIT"
] | null | null | null |
gigasecond/gigasecond.py
|
Isaac-Tolu/exercism-python
|
17c26b446e1f79a24daf6736dcf9982c16d06c50
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
def add(moment):
passed_time = timedelta(seconds=+1000000000)
return moment + passed_time
| 19.285714
| 48
| 0.733333
| 16
| 135
| 6.0625
| 0.75
| 0.247423
| 0.329897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 0.2
| 135
| 7
| 49
| 19.285714
| 0.805556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.5
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
d50c780c2b041f164180028e9e692fdfb56b780e
| 19,749
|
py
|
Python
|
IntCode/__init__.py
|
guilhermebaos/AoC-2020-Python-Solution
|
4d473e254b88bacd728338f94788d5592776c4ff
|
[
"MIT"
] | null | null | null |
IntCode/__init__.py
|
guilhermebaos/AoC-2020-Python-Solution
|
4d473e254b88bacd728338f94788d5592776c4ff
|
[
"MIT"
] | null | null | null |
IntCode/__init__.py
|
guilhermebaos/AoC-2020-Python-Solution
|
4d473e254b88bacd728338f94788d5592776c4ff
|
[
"MIT"
] | null | null | null |
def intcode(memory=(), inputs=(), com_pos=0, rel_bas=0, output_vars=False, prints=True):
param1 = param2 = param3 = stored = 999999
# com_pos Command Position
# rel_bas Relative base for relative position parameters
outputs = []
inputs = list(inputs)
memory = list(memory)
if len(memory) < 10000:
memory += [0] * 10000
while True:
# What is the command
instruction = str(memory[com_pos])
# Identify the Opcode and its increase value
opcode = int(instruction[-2:])
if opcode == 99:
opcode_len = 1
elif opcode == 3 or opcode == 4 or opcode == 9:
opcode_len = 2
elif opcode == 5 or opcode == 6:
opcode_len = 3
else:
opcode_len = 4
# Identify the parameters modes for the Opcodes' parameters
parameters = str(instruction[:-2])
if len(parameters) < opcode_len:
parameters = parameters.rjust(opcode_len, '0')
if len(parameters) < 3:
parameters = parameters.rjust(3, '0')
# Identify the parameters that are going to be used by the Opcodes
try:
if opcode != 99:
if parameters[-1] == '0': # 0-> Position Mode (Stores the address of the parameter)
param1 = memory[memory[com_pos + 1]]
stored = memory[com_pos + 1]
elif parameters[-1] == '1': # 1-> Absolute Mode (Stores the value of the parameter)
param1 = memory[com_pos + 1]
stored = com_pos + 1
elif parameters[-1] == '2': # 2-> Relative Mode (Stores the address relative to the base)
param1 = memory[memory[com_pos + 1] + rel_bas]
stored = memory[com_pos + 1] + rel_bas
if opcode != 3 and opcode != 4 and opcode != 9:
if parameters[-2] == '0': # 0-> Position Mode
param2 = memory[memory[com_pos + 2]]
elif parameters[-2] == '1': # 1-> Absolute Mode
param2 = memory[com_pos + 2]
elif parameters[-2] == '2':
param2 = memory[memory[com_pos + 2] + rel_bas]
if opcode != 5 and opcode != 6:
if parameters[-3] == '0': # 0-> Position Mode
param3 = memory[com_pos + 3]
elif parameters[-3] == '1': # 1-> Absolute Mode
param3 = com_pos + 3
elif parameters[-3] == '2':
param3 = memory[com_pos + 3] + rel_bas
except IndexError as error:
print(f'ERROR: {error}, Opcode: {opcode}')
# Execute the Opcodes
# print('IntCode info:', 'Opcode:', opcode, 'Com_pos:', com_pos, 'Rel_bas:', rel_bas)
if opcode == 99: # Opcode 99 -> Finish the program
break
elif opcode == 1: # Opcode 01 -> Sum two numbers and store them
result = param1 + param2
memory[param3] = result
com_pos += opcode_len
elif opcode == 2: # Opcode 02 -> Multiply two numbers and store them
result = param1 * param2
memory[param3] = result
com_pos += opcode_len
elif opcode == 3: # Opcode 03 -> Get user input and store it
if len(inputs) == 0:
result = int(input('Input value: ').strip())
print('\n')
memory[stored] = result
com_pos += opcode_len
else:
result = inputs[0]
memory[stored] = result
inputs.pop(0)
com_pos += opcode_len
elif opcode == 4: # Opcode 04 -> Print a result
if prints:
print('\nOutput:', param1,
f' (stored in adress {stored})', '\n')
outputs += [param1]
com_pos += opcode_len
elif opcode == 5: # Opcode 05 -> Jump to address param2 if param1 != 0
if param1 != 0:
com_pos = param2
else:
com_pos += opcode_len
elif opcode == 6: # Opcode 06 -> Jump to address param2 if param1 == 0
if param1 == 0:
com_pos = param2
else:
com_pos += opcode_len
elif opcode == 7: # Opcode 07 -> Stores 1 if param1 < param2 else stores 0
if param1 < param2:
memory[param3] = 1
else:
memory[param3] = 0
com_pos += opcode_len
elif opcode == 8: # Opcode 08 -> Stores 1 if param1 = param2 else stores 0
if param1 == param2:
memory[param3] = 1
else:
memory[param3] = 0
com_pos += opcode_len
elif opcode == 9: # Opcode 09 -> Increases the relative base
rel_bas += param1
com_pos += opcode_len
if output_vars:
return memory, outputs, com_pos, rel_bas
else:
return memory, outputs
def intcode_day2(memory=()):
com_pos = 0 # Command Position
inputs = list(memory)
while True:
op_code = inputs[com_pos] # What is the command
if op_code == 99:
break
elif op_code == 1:
value = inputs[inputs[com_pos + 1]] + inputs[inputs[com_pos + 2]]
inputs[inputs[com_pos + 3]] = value
elif op_code == 2:
value = inputs[inputs[com_pos + 1]] * inputs[inputs[com_pos + 2]]
inputs[inputs[com_pos + 3]] = value
com_pos += 4
return inputs
def intcode_day5(memory=()):
param1 = param2 = 999999
com_pos = 0 # Command Position
inputs = list(memory)
while True:
# What is the command
instruction = str(inputs[com_pos])
# Identify the Opcode and its increase value
opcode = int(instruction[-2:])
if opcode == 99:
param3 = 0
opcode_len = 1
elif opcode == 3 or opcode == 4:
param3 = inputs[com_pos + 1]
opcode_len = 2
elif opcode == 5 or opcode == 6:
param3 = 999999
opcode_len = 3
else:
param3 = inputs[com_pos + 3]
opcode_len = 4
# Identify the parameters modes for the function parameters
parameters = str(instruction[:-2])
if len(parameters) < opcode_len-1:
parameters = parameters.rjust(opcode_len-1, '0')
if len(parameters) < 2:
parameters = parameters.rjust(2, '0')
# Identify the parameters that are going to be used by the Opcodes
if opcode != 3 and opcode != 4 and opcode != 99:
if parameters[-1] == '0':
param1 = inputs[inputs[com_pos + 1]]
elif parameters[-1] == '1':
param1 = inputs[com_pos + 1]
if parameters[-2] == '0':
param2 = inputs[inputs[com_pos + 2]]
elif parameters[-2] == '1':
param2 = inputs[com_pos + 2]
# Execute the Opcodes
if opcode == 99: # Opcode 99 -> Finish the program
break
elif opcode == 1: # Opcode 01 -> Sum two numbers and store them
result = param1 + param2
inputs[param3] = result
com_pos += opcode_len
elif opcode == 2: # Opcode 02 -> Multiply two numbers and store them
result = param1 * param2
inputs[param3] = result
com_pos += opcode_len
elif opcode == 3: # Opcode 03 -> Get user input and store it
result = int(input('Input value: ').strip())
print('\n')
inputs[param3] = result
com_pos += opcode_len
elif opcode == 4: # Opcode 04 -> Print a result
print('\nDeviation form expected value:', inputs[inputs[com_pos + 1]],
f' (stored in adress {inputs[com_pos + 1]})', '\n')
com_pos += opcode_len
elif opcode == 5: # Opcode 05 -> Jump to adress param2 if param1 != 0
if param1 != 0:
com_pos = param2
else:
com_pos += opcode_len
elif opcode == 6: # Opcode 06 -> Jump to adress param2 if param1 == 0
if param1 == 0:
com_pos = param2
else:
com_pos += opcode_len
elif opcode == 7: # Opcode 07 -> Stores 1 if param1 < param2 else stores 0
if param1 < param2:
inputs[param3] = 1
else:
inputs[param3] = 0
com_pos += opcode_len
elif opcode == 8: # Opcode 08 -> Stores 1 if param1 = param2 else stores 0
if param1 == param2:
inputs[param3] = 1
else:
inputs[param3] = 0
com_pos += opcode_len
return inputs
def intcode_day7(memory=(), com_pos=0, *user_inputs):
print(user_inputs)
param1 = param2 = 999999
input_num = output = 0 # Command Position
condition = True
inputs = list(memory)
while True:
# What is the command
instruction = str(inputs[com_pos])
# Identify the Opcode and its increase value
opcode = int(instruction[-2:])
if opcode == 99:
param3 = 0
opcode_len = 1
elif opcode == 3 or opcode == 4:
param3 = inputs[com_pos + 1]
opcode_len = 2
elif opcode == 5 or opcode == 6:
param3 = 999999
opcode_len = 3
else:
param3 = inputs[com_pos + 3]
opcode_len = 4
# Identify the parameters modes for the function parameters
parameters = str(instruction[:-2])
if len(parameters) < opcode_len-1:
parameters = parameters.rjust(opcode_len-1, '0')
if len(parameters) < 2:
parameters = parameters.rjust(2, '0')
# Identify the parameters that are going to be used by the Opcodes
if opcode != 3 and opcode != 4 and opcode != 99:
if parameters[-1] == '0':
param1 = inputs[inputs[com_pos + 1]]
elif parameters[-1] == '1':
param1 = inputs[com_pos + 1]
if parameters[-2] == '0':
param2 = inputs[inputs[com_pos + 2]]
elif parameters[-2] == '1':
param2 = inputs[com_pos + 2]
# Execute the Opcodes
if opcode == 99: # Opcode 99 -> Finish the program
condition = False
break
elif opcode == 1: # Opcode 01 -> Sum two numbers and store them
result = param1 + param2
inputs[param3] = result
com_pos += opcode_len
elif opcode == 2: # Opcode 02 -> Multiply two numbers and store them
result = param1 * param2
inputs[param3] = result
com_pos += opcode_len
elif opcode == 3: # Opcode 03 -> Get user input and store it
if len(user_inputs) == 0:
result = int(input('Input value: ').strip())
print('\n')
inputs[param3] = result
com_pos += opcode_len
else:
print(input_num)
try:
result = user_inputs[input_num]
inputs[param3] = result
com_pos += opcode_len
input_num += 1
except IndexError:
break
elif opcode == 4: # Opcode 04 -> Print a result
print('\nOutput to next amplifier:', inputs[inputs[com_pos + 1]],
f' (stored in adress {inputs[com_pos + 1]})', '\n')
output = inputs[inputs[com_pos + 1]]
condition = True
com_pos += opcode_len
elif opcode == 5: # Opcode 05 -> Jump to adress param2 if param1 != 0
if param1 != 0:
com_pos = param2
else:
com_pos += opcode_len
elif opcode == 6: # Opcode 06 -> Jump to adress param2 if param1 == 0
if param1 == 0:
com_pos = param2
else:
com_pos += opcode_len
elif opcode == 7: # Opcode 07 -> Stores 1 if param1 < param2 else stores 0
if param1 < param2:
inputs[param3] = 1
else:
inputs[param3] = 0
com_pos += opcode_len
elif opcode == 8: # Opcode 08 -> Stores 1 if param1 = param2 else stores 0
if param1 == param2:
inputs[param3] = 1
else:
inputs[param3] = 0
com_pos += opcode_len
return inputs, output, condition, com_pos
def intcode_day11(memory=(), inputs=(), com_pos=0, rel_bas=0):
param1 = param2 = param3 = stored = 999999
# com_pos Command Position
# rel_bas Relative base for relative position parameters
outputs = []
will_break = False
inputs = list(inputs)
memory = list(memory)
if len(memory) < 10000:
memory += [0] * 1000
while True:
# What is the command
instruction = str(memory[com_pos])
# Identify the Opcode and its increase value
opcode = int(instruction[-2:])
if opcode == 99:
opcode_len = 1
elif opcode == 3 or opcode == 4 or opcode == 9:
opcode_len = 2
elif opcode == 5 or opcode == 6:
opcode_len = 3
else:
opcode_len = 4
# Identify the parameters modes for the Opcodes' parameters
parameters = str(instruction[:-2])
if len(parameters) < opcode_len:
parameters = parameters.rjust(opcode_len, '0')
if len(parameters) < 3:
parameters = parameters.rjust(3, '0')
# Identify the parameters that are going to be used by the Opcodes
try:
if opcode != 99:
if parameters[-1] == '0': # 0-> Position Mode (Stores the address of the parameter)
param1 = memory[memory[com_pos + 1]]
stored = memory[com_pos + 1]
elif parameters[-1] == '1': # 1-> Absolute Mode (Stores the value of the parameter)
param1 = memory[com_pos + 1]
stored = com_pos + 1
elif parameters[-1] == '2': # 2-> Relative Mode (Stores the address relative to the base)
param1 = memory[memory[com_pos + 1] + rel_bas]
stored = memory[com_pos + 1] + rel_bas
if opcode != 3 and opcode != 4 and opcode != 9:
if parameters[-2] == '0': # 0-> Position Mode
param2 = memory[memory[com_pos + 2]]
elif parameters[-2] == '1': # 1-> Absolute Mode
param2 = memory[com_pos + 2]
elif parameters[-2] == '2':
param2 = memory[memory[com_pos + 2] + rel_bas]
if opcode != 5 and opcode != 6:
if parameters[-3] == '0': # 0-> Position Mode
param3 = memory[com_pos + 3]
elif parameters[-3] == '1': # 1-> Absolute Mode
param3 = com_pos + 3
elif parameters[-3] == '2':
param3 = memory[com_pos + 3] + rel_bas
except IndexError as error:
print(f'ERROR: {error}, Opcode: {opcode}')
# Execute the Opcodes
# print('IntCode info:', 'Opcode:', opcode, 'Com_pos:', com_pos, 'Rel_bas:', rel_bas)
if opcode == 99: # Opcode 99 -> Finish the program
outputs += [-1]
break
elif opcode == 1: # Opcode 01 -> Sum two numbers and store them
result = param1 + param2
memory[param3] = result
com_pos += opcode_len
elif opcode == 2: # Opcode 02 -> Multiply two numbers and store them
result = param1 * param2
memory[param3] = result
com_pos += opcode_len
elif opcode == 3: # Opcode 03 -> Get user input and store it
if will_break:
break
if len(inputs) == 0:
result = int(input('Input value: ').strip())
print('\n')
memory[stored] = result
com_pos += opcode_len
else:
result = inputs[0]
memory[stored] = result
inputs.pop(0)
com_pos += opcode_len
elif opcode == 4: # Opcode 04 -> Print a result
print('\nOutput:', param1,
f' (stored in adress {stored})', '\n')
outputs += [param1]
com_pos += opcode_len
if len(outputs) == 2:
will_break = True
elif opcode == 5: # Opcode 05 -> Jump to address param2 if param1 != 0
if param1 != 0:
com_pos = param2
else:
com_pos += opcode_len
elif opcode == 6: # Opcode 06 -> Jump to address param2 if param1 == 0
if param1 == 0:
com_pos = param2
else:
com_pos += opcode_len
elif opcode == 7: # Opcode 07 -> Stores 1 if param1 < param2 else stores 0
if param1 < param2:
memory[param3] = 1
else:
memory[param3] = 0
com_pos += opcode_len
elif opcode == 8: # Opcode 08 -> Stores 1 if param1 = param2 else stores 0
if param1 == param2:
memory[param3] = 1
else:
memory[param3] = 0
com_pos += opcode_len
elif opcode == 9: # Opcode 09 -> Increases the relative base
rel_bas += param1
com_pos += opcode_len
return memory, outputs, com_pos, rel_bas
| 43.98441
| 133
| 0.45496
| 2,069
| 19,749
| 4.239246
| 0.062832
| 0.076616
| 0.050621
| 0.063277
| 0.933189
| 0.931023
| 0.929541
| 0.918709
| 0.912781
| 0.912781
| 0
| 0.05738
| 0.457289
| 19,749
| 448
| 134
| 44.082589
| 0.760963
| 0.172566
| 0
| 0.913706
| 0
| 0
| 0.023681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01269
| false
| 0
| 0
| 0
| 0.027919
| 0.035533
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1d21ed45a91a32c283f1e921d744236180a002a8
| 127
|
py
|
Python
|
chainermnx/__init__.py
|
ankahira/chainermnx
|
ffee217a555a5d59a6ccd5d8b054e071d1d7d09a
|
[
"MIT"
] | null | null | null |
chainermnx/__init__.py
|
ankahira/chainermnx
|
ffee217a555a5d59a6ccd5d8b054e071d1d7d09a
|
[
"MIT"
] | null | null | null |
chainermnx/__init__.py
|
ankahira/chainermnx
|
ffee217a555a5d59a6ccd5d8b054e071d1d7d09a
|
[
"MIT"
] | null | null | null |
from chainermnx import functions
from chainermnx import links
from chainermnx import training
from chainermnx import utils
| 14.111111
| 32
| 0.84252
| 16
| 127
| 6.6875
| 0.4375
| 0.523364
| 0.747664
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15748
| 127
| 8
| 33
| 15.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1d5b32a42af9840ca0d681651ef5f7c82a5a6333
| 67,601
|
py
|
Python
|
rapid7vmconsole/models/shared_credential.py
|
kiblik/vm-console-client-python
|
038f6d33e8b2654a558326c6eb87f09ee23e0e22
|
[
"MIT"
] | 61
|
2018-05-17T05:57:09.000Z
|
2022-03-08T13:59:21.000Z
|
rapid7vmconsole/models/shared_credential.py
|
kiblik/vm-console-client-python
|
038f6d33e8b2654a558326c6eb87f09ee23e0e22
|
[
"MIT"
] | 33
|
2018-06-26T16:21:14.000Z
|
2022-03-03T20:55:47.000Z
|
rapid7vmconsole/models/shared_credential.py
|
kiblik/vm-console-client-python
|
038f6d33e8b2654a558326c6eb87f09ee23e0e22
|
[
"MIT"
] | 43
|
2018-02-24T05:45:53.000Z
|
2022-03-31T22:15:16.000Z
|
# coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SharedCredential(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account': 'SharedCredentialAccount',
'description': 'str',
'host_restriction': 'str',
'id': 'int',
'name': 'str',
'port_restriction': 'int',
'site_assignment': 'str',
'sites': 'list[int]'
}
attribute_map = {
'account': 'account',
'description': 'description',
'host_restriction': 'hostRestriction',
'id': 'id',
'name': 'name',
'port_restriction': 'portRestriction',
'site_assignment': 'siteAssignment',
'sites': 'sites'
}
def __init__(self, account=None, description=None, host_restriction=None, id=None, name=None, port_restriction=None, site_assignment=None, sites=None): # noqa: E501
"""SharedCredential - a model defined in Swagger""" # noqa: E501
self._account = None
self._description = None
self._host_restriction = None
self._id = None
self._name = None
self._port_restriction = None
self._site_assignment = None
self._sites = None
self.discriminator = None
self.account = account
if description is not None:
self.description = description
if host_restriction is not None:
self.host_restriction = host_restriction
if id is not None:
self.id = id
self.name = name
if port_restriction is not None:
self.port_restriction = port_restriction
self.site_assignment = site_assignment
if sites is not None:
self.sites = sites
@property
def account(self):
"""Gets the account of this SharedCredential. # noqa: E501
Specify the type of service to authenticate as well as all of the information required by that service. <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">service</span> <span class=\"param-type\">string</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"as400\"</span> <span class=\"param-enum-value string\">\"cifs\"</span> <span class=\"param-enum-value string\">\"cifshash\"</span> <span class=\"param-enum-value string\">\"cvs\"</span> <span class=\"param-enum-value string\">\"db2\"</span> <span class=\"param-enum-value string\">\"ftp\"</span> <span class=\"param-enum-value string\">\"http\"</span> <span class=\"param-enum-value string\">\"ms-sql\"</span> <span class=\"param-enum-value string\">\"mysql\"</span> <span class=\"param-enum-value string\">\"notes\"</span> <span class=\"param-enum-value string\">\"oracle\"</span> <span class=\"param-enum-value string\">\"pop\"</span> <span class=\"param-enum-value string\">\"postgresql\"</span> <span class=\"param-enum-value string\">\"remote-exec\"</span> <span class=\"param-enum-value string\">\"snmp\"</span> <span class=\"param-enum-value string\">\"snmpv3\"</span> <span class=\"param-enum-value string\">\"ssh\"</span> <span class=\"param-enum-value string\">\"ssh-key\"</span> <span class=\"param-enum-value string\">\"sybase\"</span> <span class=\"param-enum-value string\">\"telnet\"</span> </div> <div class=\"redoc-markdown-block\">The type of service to authenticate with.</div> </div> </div> The following are the names of the valid values for service: | Value | Service | | ------------- | ----------------------------------------------- | | `as400` | IBM AS/400 | | `cifs` | Microsoft Windows/Samba (SMB/CIFS) | | `cifshash` | Microsoft Windows/Samba LM/NTLM Hash (SMB/CIFS) | | `cvs` | Concurrent Versioning System (CVS) | | `db2` | DB2 | | `ftp` | File Transfer Protocol (FTP) | | `http` | Web Site HTTP Authentication | | `ms-sql` | Microsoft SQL Server | | `mysql` | MySQL Server | | `notes` | Lotus Notes/Domino | | `oracle` | Oracle | | `pop` | Post Office Protocol (POP) | | `postgresql` | PostgreSQL | | `remote-exec` | Remote Execution | | `snmp` | Simple Network Management Protocol v1/v2c | | `snmpv3` | Simple Network Management Protocol v3 | | `ssh` | Secure Shell (SSH) | | `ssh-key` | Secure Shell (SSH) Public Key | | `sybase` | Sybase SQL Server | | `telnet` | Telnet | <p>The following is a specification of supported credential properties for each type of service. These properties are to be specified within the <code>account</code> object.</p> `as400` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `cifs` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `cifshash` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">ntlmHash</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The NTLM password hash. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `cvs` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `db2` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `ftp` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `http` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">realm</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The realm.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `ms-sql` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database. If not specified, a default database name will be used during authentication.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">useWindowsAuthentication</span> <span class=\"param-type\">boolean</span> <div class=\"redoc-markdown-block\"> <p> Boolean flag signaling whether to connect to the database using Windows authentication. When set to <code>true</code>, Windows authentication is attempted; when set to <code>false</code>, SQL authentication is attempted.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain. This property cannot be specified unless property <code>useWindowsAuthentication</code> is set to <code>true</code>.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `mysql` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database. If not specified, a default database name will be used during authentication.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The Notes ID password. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `notes` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">notesIDPassword</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `oracle` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">sid</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database. If not specified, a default database name will be used during authentication.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">enumerateSids</span> <span class=\"param-type\">boolean</span> <div class=\"redoc-markdown-block\"> <p> Boolean flag instructing the scan engine to attempt to enumerate SIDs from your environment. If set to <code>true</code>, set the Oracle Net Listener password in property <code>oracleListenerPassword</code>.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">oracleListenerPassword</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The Oracle Net Listener password. Used to enumerate SIDs from your environment.</p></div> </div> </div> `pop` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `postgresql` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `remote-exec` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `snmp` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">communityName</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The community name that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `snmpv3` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">authenticationType</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"no-authentication\"</span> <span class=\"param-enum-value string\">\"md5\"</span> <span class=\"param-enum-value string\">\"sha\"</span> </div> <div class=\"redoc-markdown-block\"><p>The authentication protocols available to use in SNMP v3.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The password for the account that will be used for authenticating. Is required when the property <code>authenticationType</code> is set to valid value other than <code>\"no-authentication\"</code>. <strong>Note: This property is not returned in responses for security.</strong></p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">privacyType</span> <span class=\"param-type\">string</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"no-privacy\"</span> <span class=\"param-enum-value string\">\"des\"</span> <span class=\"param-enum-value string\">\"aes-128\"</span> <span class=\"param-enum-value string\">\"aes-192\"</span> <span class=\"param-enum-value string\">\"aes-192-with-3-des-key-extension\"</span> <span class=\"param-enum-value string\">\"aes-256\"</span> <span class=\"param-enum-value string\">\"aes-265-with-3-des-key-extension\"</span> </div> <div class=\"redoc-markdown-block\"><p>The privacy protocols available to use in SNMP v3.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">privacyPassword</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The privacy password for the account that will be used for authenticating. Is required when the property <code>authenticationType</code> is set to valid value other than <code>\"no-authentication\"</code> and when the <code>privacyType</code> is set to a valid value other than code>\"no-privacy\"</code>. <strong>Note: This property is not returned in responses for security.</strong></p> </div> </div> </div> `ssh` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">permissionElevation</span> <span class=\"param-type\">string</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"none\"</span> <span class=\"param-enum-value string\">\"sudo\"</span> <span class=\"param-enum-value string\">\"sudosu\"</span> <span class=\"param-enum-value string\">\"su\"</span> <span class=\"param-enum-value string\">\"pbrun\"</span> <span class=\"param-enum-value string\">\"privileged-exec\"</span> </div> <div class=\"redoc-markdown-block\"> <p> Elevate scan engine permissions to administrative or root access, which is necessary to obtain certain data during the scan. Defaults to <code>\"none\"</code> if not specified. </p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">permissionElevationUsername</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The user name for the account with elevated permissions. This property must not be specified when the property <code>permissionElevation</code> is set to either <code>\"none\"</code> or <code>\"pbrun\"</code>; otherwise the property is required.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The password for the account with elevated permissions. This property must not be specified when the property <code>permissionElevation</code> is set to either <code>\"none\"</code> or <code>\"pbrun\"</code>; otherwise the property is required.<strong>Note: This property is not returned in responses for security.</strong></p> </div> </div> </div> `ssh-key` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">privateKeyPassword</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The password for private key. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">pemKey</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The PEM-format private key. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">permissionElevation</span> <span class=\"param-type\">string</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"none\"</span> <span class=\"param-enum-value string\">\"sudo\"</span> <span class=\"param-enum-value string\">\"sudosu\"</span> <span class=\"param-enum-value string\">\"su\"</span> <span class=\"param-enum-value string\">\"pbrun\"</span> <span class=\"param-enum-value string\">\"privileged-exec\"</span> </div> <div class=\"redoc-markdown-block\"> <p> Elevate scan engine permissions to administrative or root access, which is necessary to obtain certain data during the scan. Defaults to <code>\"none\"</code> if not specified. </p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">permissionElevationUsername</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The user name for the account with elevated permissions. This property must not be specified when the property <code>permissionElevation</code> is set to either <code>\"none\"</code> or <code>\"pbrun\"</code>; otherwise the property is required.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The password for the account with elevated permissions. This property must not be specified when the property <code>permissionElevation</code> is set to either <code>\"none\"</code> or <code>\"pbrun\"</code>; otherwise the property is required.<strong>Note: This property is not returned in responses for security.</strong></p> </div> </div> </div> `sybase` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database. If not specified, a default database name will be used during authentication.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">useWindowsAuthentication</span> <span class=\"param-type\">boolean</span> <div class=\"redoc-markdown-block\"> <p> Boolean flag signaling whether to connect to the database using Windows authentication. When set to <code>true</code>, Windows authentication is attempted; when set to <code>false</code>, SQL authentication is attempted.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain. This property cannot be specified unless property <code>useWindowsAuthentication</code> is set to <code>true</code>.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `telnet` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> # noqa: E501
:return: The account of this SharedCredential. # noqa: E501
:rtype: SharedCredentialAccount
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this SharedCredential.
Specify the type of service to authenticate as well as all of the information required by that service. <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">service</span> <span class=\"param-type\">string</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"as400\"</span> <span class=\"param-enum-value string\">\"cifs\"</span> <span class=\"param-enum-value string\">\"cifshash\"</span> <span class=\"param-enum-value string\">\"cvs\"</span> <span class=\"param-enum-value string\">\"db2\"</span> <span class=\"param-enum-value string\">\"ftp\"</span> <span class=\"param-enum-value string\">\"http\"</span> <span class=\"param-enum-value string\">\"ms-sql\"</span> <span class=\"param-enum-value string\">\"mysql\"</span> <span class=\"param-enum-value string\">\"notes\"</span> <span class=\"param-enum-value string\">\"oracle\"</span> <span class=\"param-enum-value string\">\"pop\"</span> <span class=\"param-enum-value string\">\"postgresql\"</span> <span class=\"param-enum-value string\">\"remote-exec\"</span> <span class=\"param-enum-value string\">\"snmp\"</span> <span class=\"param-enum-value string\">\"snmpv3\"</span> <span class=\"param-enum-value string\">\"ssh\"</span> <span class=\"param-enum-value string\">\"ssh-key\"</span> <span class=\"param-enum-value string\">\"sybase\"</span> <span class=\"param-enum-value string\">\"telnet\"</span> </div> <div class=\"redoc-markdown-block\">The type of service to authenticate with.</div> </div> </div> The following are the names of the valid values for service: | Value | Service | | ------------- | ----------------------------------------------- | | `as400` | IBM AS/400 | | `cifs` | Microsoft Windows/Samba (SMB/CIFS) | | `cifshash` | Microsoft Windows/Samba LM/NTLM Hash (SMB/CIFS) | | `cvs` | Concurrent Versioning System (CVS) | | `db2` | DB2 | | `ftp` | File Transfer Protocol (FTP) | | `http` | Web Site HTTP Authentication | | `ms-sql` | Microsoft SQL Server | | `mysql` | MySQL Server | | `notes` | Lotus Notes/Domino | | `oracle` | Oracle | | `pop` | Post Office Protocol (POP) | | `postgresql` | PostgreSQL | | `remote-exec` | Remote Execution | | `snmp` | Simple Network Management Protocol v1/v2c | | `snmpv3` | Simple Network Management Protocol v3 | | `ssh` | Secure Shell (SSH) | | `ssh-key` | Secure Shell (SSH) Public Key | | `sybase` | Sybase SQL Server | | `telnet` | Telnet | <p>The following is a specification of supported credential properties for each type of service. These properties are to be specified within the <code>account</code> object.</p> `as400` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `cifs` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `cifshash` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">ntlmHash</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The NTLM password hash. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `cvs` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `db2` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `ftp` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `http` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">realm</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The realm.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `ms-sql` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database. If not specified, a default database name will be used during authentication.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">useWindowsAuthentication</span> <span class=\"param-type\">boolean</span> <div class=\"redoc-markdown-block\"> <p> Boolean flag signaling whether to connect to the database using Windows authentication. When set to <code>true</code>, Windows authentication is attempted; when set to <code>false</code>, SQL authentication is attempted.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain. This property cannot be specified unless property <code>useWindowsAuthentication</code> is set to <code>true</code>.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `mysql` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database. If not specified, a default database name will be used during authentication.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The Notes ID password. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `notes` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">notesIDPassword</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `oracle` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">sid</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database. If not specified, a default database name will be used during authentication.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">enumerateSids</span> <span class=\"param-type\">boolean</span> <div class=\"redoc-markdown-block\"> <p> Boolean flag instructing the scan engine to attempt to enumerate SIDs from your environment. If set to <code>true</code>, set the Oracle Net Listener password in property <code>oracleListenerPassword</code>.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">oracleListenerPassword</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The Oracle Net Listener password. Used to enumerate SIDs from your environment.</p></div> </div> </div> `pop` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `postgresql` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `remote-exec` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `snmp` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">communityName</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The community name that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `snmpv3` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">authenticationType</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"no-authentication\"</span> <span class=\"param-enum-value string\">\"md5\"</span> <span class=\"param-enum-value string\">\"sha\"</span> </div> <div class=\"redoc-markdown-block\"><p>The authentication protocols available to use in SNMP v3.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The password for the account that will be used for authenticating. Is required when the property <code>authenticationType</code> is set to valid value other than <code>\"no-authentication\"</code>. <strong>Note: This property is not returned in responses for security.</strong></p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">privacyType</span> <span class=\"param-type\">string</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"no-privacy\"</span> <span class=\"param-enum-value string\">\"des\"</span> <span class=\"param-enum-value string\">\"aes-128\"</span> <span class=\"param-enum-value string\">\"aes-192\"</span> <span class=\"param-enum-value string\">\"aes-192-with-3-des-key-extension\"</span> <span class=\"param-enum-value string\">\"aes-256\"</span> <span class=\"param-enum-value string\">\"aes-265-with-3-des-key-extension\"</span> </div> <div class=\"redoc-markdown-block\"><p>The privacy protocols available to use in SNMP v3.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">privacyPassword</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The privacy password for the account that will be used for authenticating. Is required when the property <code>authenticationType</code> is set to valid value other than <code>\"no-authentication\"</code> and when the <code>privacyType</code> is set to a valid value other than code>\"no-privacy\"</code>. <strong>Note: This property is not returned in responses for security.</strong></p> </div> </div> </div> `ssh` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">permissionElevation</span> <span class=\"param-type\">string</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"none\"</span> <span class=\"param-enum-value string\">\"sudo\"</span> <span class=\"param-enum-value string\">\"sudosu\"</span> <span class=\"param-enum-value string\">\"su\"</span> <span class=\"param-enum-value string\">\"pbrun\"</span> <span class=\"param-enum-value string\">\"privileged-exec\"</span> </div> <div class=\"redoc-markdown-block\"> <p> Elevate scan engine permissions to administrative or root access, which is necessary to obtain certain data during the scan. Defaults to <code>\"none\"</code> if not specified. </p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">permissionElevationUsername</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The user name for the account with elevated permissions. This property must not be specified when the property <code>permissionElevation</code> is set to either <code>\"none\"</code> or <code>\"pbrun\"</code>; otherwise the property is required.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The password for the account with elevated permissions. This property must not be specified when the property <code>permissionElevation</code> is set to either <code>\"none\"</code> or <code>\"pbrun\"</code>; otherwise the property is required.<strong>Note: This property is not returned in responses for security.</strong></p> </div> </div> </div> `ssh-key` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">privateKeyPassword</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The password for private key. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">pemKey</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The PEM-format private key. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">permissionElevation</span> <span class=\"param-type\">string</span> <div class=\"param-enum\"> <span class=\"param-enum-value string\">\"none\"</span> <span class=\"param-enum-value string\">\"sudo\"</span> <span class=\"param-enum-value string\">\"sudosu\"</span> <span class=\"param-enum-value string\">\"su\"</span> <span class=\"param-enum-value string\">\"pbrun\"</span> <span class=\"param-enum-value string\">\"privileged-exec\"</span> </div> <div class=\"redoc-markdown-block\"> <p> Elevate scan engine permissions to administrative or root access, which is necessary to obtain certain data during the scan. Defaults to <code>\"none\"</code> if not specified. </p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">permissionElevationUsername</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The user name for the account with elevated permissions. This property must not be specified when the property <code>permissionElevation</code> is set to either <code>\"none\"</code> or <code>\"pbrun\"</code>; otherwise the property is required.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"> <p> The password for the account with elevated permissions. This property must not be specified when the property <code>permissionElevation</code> is set to either <code>\"none\"</code> or <code>\"pbrun\"</code>; otherwise the property is required.<strong>Note: This property is not returned in responses for security.</strong></p> </div> </div> </div> `sybase` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">database</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The name of the database. If not specified, a default database name will be used during authentication.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">useWindowsAuthentication</span> <span class=\"param-type\">boolean</span> <div class=\"redoc-markdown-block\"> <p> Boolean flag signaling whether to connect to the database using Windows authentication. When set to <code>true</code>, Windows authentication is attempted; when set to <code>false</code>, SQL authentication is attempted.</p> </div> </div> <div class=\"property-info\"> <span class=\"property-name\">domain</span> <span class=\"param-type\">string</span> <div class=\"redoc-markdown-block\"><p>The address of the domain. This property cannot be specified unless property <code>useWindowsAuthentication</code> is set to <code>true</code>.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> `telnet` supported properties: <div class=\"properties\"> <div class=\"property-info\"> <span class=\"property-name\">username</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The user name for the account that will be used for authenticating.</p></div> </div> <div class=\"property-info\"> <span class=\"property-name\">password</span> <span class=\"param-type\">string</span> <span _ngcontent-c21 class=\"param-required\">Required</span> <div class=\"redoc-markdown-block\"><p>The password for the account that will be used for authenticating. <strong>Note: This property is not returned in responses for security.</strong></p></div> </div> </div> # noqa: E501
:param account: The account of this SharedCredential. # noqa: E501
:type: SharedCredentialAccount
"""
if account is None:
raise ValueError("Invalid value for `account`, must not be `None`") # noqa: E501
self._account = account
@property
def description(self):
"""Gets the description of this SharedCredential. # noqa: E501
The description of the credential. # noqa: E501
:return: The description of this SharedCredential. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SharedCredential.
The description of the credential. # noqa: E501
:param description: The description of this SharedCredential. # noqa: E501
:type: str
"""
self._description = description
@property
def host_restriction(self):
"""Gets the host_restriction of this SharedCredential. # noqa: E501
The host name or IP address that you want to restrict the credentials to. # noqa: E501
:return: The host_restriction of this SharedCredential. # noqa: E501
:rtype: str
"""
return self._host_restriction
@host_restriction.setter
def host_restriction(self, host_restriction):
"""Sets the host_restriction of this SharedCredential.
The host name or IP address that you want to restrict the credentials to. # noqa: E501
:param host_restriction: The host_restriction of this SharedCredential. # noqa: E501
:type: str
"""
self._host_restriction = host_restriction
@property
def id(self):
"""Gets the id of this SharedCredential. # noqa: E501
The identifier of the credential. # noqa: E501
:return: The id of this SharedCredential. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SharedCredential.
The identifier of the credential. # noqa: E501
:param id: The id of this SharedCredential. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this SharedCredential. # noqa: E501
The name of the credential. # noqa: E501
:return: The name of this SharedCredential. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SharedCredential.
The name of the credential. # noqa: E501
:param name: The name of this SharedCredential. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def port_restriction(self):
"""Gets the port_restriction of this SharedCredential. # noqa: E501
Further restricts the credential to attempt to authenticate on a specific port. The port can only be restricted if the property `hostRestriction` is specified. # noqa: E501
:return: The port_restriction of this SharedCredential. # noqa: E501
:rtype: int
"""
return self._port_restriction
@port_restriction.setter
def port_restriction(self, port_restriction):
"""Sets the port_restriction of this SharedCredential.
Further restricts the credential to attempt to authenticate on a specific port. The port can only be restricted if the property `hostRestriction` is specified. # noqa: E501
:param port_restriction: The port_restriction of this SharedCredential. # noqa: E501
:type: int
"""
if port_restriction is not None and port_restriction > 65535: # noqa: E501
raise ValueError("Invalid value for `port_restriction`, must be a value less than or equal to `65535`") # noqa: E501
if port_restriction is not None and port_restriction < 1: # noqa: E501
raise ValueError("Invalid value for `port_restriction`, must be a value greater than or equal to `1`") # noqa: E501
self._port_restriction = port_restriction
@property
def site_assignment(self):
"""Gets the site_assignment of this SharedCredential. # noqa: E501
Assigns the shared scan credential either to be available to all sites or to a specific list of sites. The following table describes each supported value: | Value | Description | | ---------- | ---------------- | | `\"all-sites\"` | The shared scan credential is assigned to all current and future sites. | | `\"specific-sites\"` | The shared scan credential is assigned to zero sites by default. Administrators must explicitly assign sites to the shared credential. | Shared scan credentials assigned to a site can disabled within the site configuration, if needed. # noqa: E501
:return: The site_assignment of this SharedCredential. # noqa: E501
:rtype: str
"""
return self._site_assignment
@site_assignment.setter
def site_assignment(self, site_assignment):
"""Sets the site_assignment of this SharedCredential.
Assigns the shared scan credential either to be available to all sites or to a specific list of sites. The following table describes each supported value: | Value | Description | | ---------- | ---------------- | | `\"all-sites\"` | The shared scan credential is assigned to all current and future sites. | | `\"specific-sites\"` | The shared scan credential is assigned to zero sites by default. Administrators must explicitly assign sites to the shared credential. | Shared scan credentials assigned to a site can disabled within the site configuration, if needed. # noqa: E501
:param site_assignment: The site_assignment of this SharedCredential. # noqa: E501
:type: str
"""
if site_assignment is None:
raise ValueError("Invalid value for `site_assignment`, must not be `None`") # noqa: E501
self._site_assignment = site_assignment
@property
def sites(self):
"""Gets the sites of this SharedCredential. # noqa: E501
List of site identifiers. These sites are explicitly assigned access to the shared scan credential, allowing the site to use the credential for authentication during a scan. This property can only be set if the value of property `siteAssignment` is set to `\"specific-sites\"`. When the property `siteAssignment` is set to `\"all-sites\"`, this property will be `null`. # noqa: E501
:return: The sites of this SharedCredential. # noqa: E501
:rtype: list[int]
"""
return self._sites
@sites.setter
def sites(self, sites):
"""Sets the sites of this SharedCredential.
List of site identifiers. These sites are explicitly assigned access to the shared scan credential, allowing the site to use the credential for authentication during a scan. This property can only be set if the value of property `siteAssignment` is set to `\"specific-sites\"`. When the property `siteAssignment` is set to `\"all-sites\"`, this property will be `null`. # noqa: E501
:param sites: The sites of this SharedCredential. # noqa: E501
:type: list[int]
"""
self._sites = sites
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SharedCredential, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SharedCredential):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 211.915361
| 27,991
| 0.677919
| 9,146
| 67,601
| 4.98841
| 0.036737
| 0.068648
| 0.066281
| 0.081273
| 0.940382
| 0.930431
| 0.92653
| 0.907834
| 0.896633
| 0.888392
| 0
| 0.006995
| 0.145693
| 67,601
| 318
| 27,992
| 212.581761
| 0.783003
| 0.88475
| 0
| 0.072464
| 1
| 0
| 0.118462
| 0.004489
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15942
| false
| 0
| 0.021739
| 0
| 0.304348
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
1d7dd56b970d212604d99e45554504139f9e494b
| 170
|
py
|
Python
|
todo/backend/todos/admin.py
|
idle-solutions/vk-game
|
08aeff3fdd2a74ee1942bfe064fff988973aacdc
|
[
"MIT"
] | null | null | null |
todo/backend/todos/admin.py
|
idle-solutions/vk-game
|
08aeff3fdd2a74ee1942bfe064fff988973aacdc
|
[
"MIT"
] | 1
|
2019-10-23T15:32:53.000Z
|
2019-10-23T15:32:53.000Z
|
todo/backend/todos/admin.py
|
idle-solutions/vk-game
|
08aeff3fdd2a74ee1942bfe064fff988973aacdc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Player, Character, Todo
# admin.site.register(Todo)
# admin.site.register(Player)
# admin.site.register(Character)
| 21.25
| 43
| 0.782353
| 23
| 170
| 5.782609
| 0.478261
| 0.203008
| 0.383459
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105882
| 170
| 7
| 44
| 24.285714
| 0.875
| 0.494118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
1d9dfac474c2088651fb9dd322e799846c0fc446
| 9,580
|
py
|
Python
|
tests/test_args_checker.py
|
EVEprosper/ProsperLint
|
28edf818bea3bc56c06c3a891be878b8d4d26534
|
[
"MIT"
] | null | null | null |
tests/test_args_checker.py
|
EVEprosper/ProsperLint
|
28edf818bea3bc56c06c3a891be878b8d4d26534
|
[
"MIT"
] | null | null | null |
tests/test_args_checker.py
|
EVEprosper/ProsperLint
|
28edf818bea3bc56c06c3a891be878b8d4d26534
|
[
"MIT"
] | null | null | null |
"""Tests for the string quote checker for class-level docstrings.
"""
from pylint_prosper.args_checker import ArgsIndentChecker
import helpers
from pylint import testutils
import astroid
class TestFuncArgsIndentChecker(helpers.ProsperCheckerTestCase):
CHECKER_CLASS = ArgsIndentChecker
def test_good_function(self):
"""make sure good practice is supported"""
good_function = '''
def my_good_function( #@
arg1,
arg2,
optional_arg=None
):
return arg1 + arg2 + optional_arg
'''
block = astroid.extract_node(good_function)
with self.assertNoMessages():
self.checker.visit_functiondef(block)
def test_good_function_empty(self):
"""make sure good practice is supported"""
good_function = '''
def my_good_function(): #@
return arg1 + arg2 + optional_arg
'''
block = astroid.extract_node(good_function)
with self.assertNoMessages():
self.checker.visit_functiondef(block)
def test_bad_function(self):
"""make sure bad format is caught"""
bad_function = '''
def my_bad_function(arg1, #@
arg2,
optional_arg=None
):
return arg1 + arg2 + optional_arg
'''
block = astroid.extract_node(bad_function)
with self.assertAddsMessages(
testutils.Message(
msg_id='invalid-function-arg-format',
line=2
)
):
self.checker.visit_functiondef(block)
@testutils.set_config(kevlin_func_args=False)
def test_bad_function_override(self):
"""make sure bad format is caught"""
bad_function = '''
def my_bad_function(arg1, #@
arg2,
optional_arg=None
):
return arg1 + arg2 + optional_arg
'''
block = astroid.extract_node(bad_function)
with self.assertNoMessages():
self.checker.visit_functiondef(block)
def test_good_oneline_function(self):
"""don't make noise if one-line args are within limit (2)"""
good_oneline_func = '''
def my_oneliner(arg1, arg2): #@
pass
'''
block = astroid.extract_node(good_oneline_func)
with self.assertNoMessages():
self.checker.visit_functiondef(block)
def test_too_many_oneline_function(self):
"""make sure bad one-line format is caught"""
bad_oneline_func = '''
def my_oneliner(arg1, arg2, arg3): #@
pass
'''
block = astroid.extract_node(bad_oneline_func)
with self.assertAddsMessages(
testutils.Message(
msg_id='invalid-oneline-function-format',
line=2,
args=2
)
):
self.checker.visit_functiondef(block)
@testutils.set_config(single_line_args_limit=3)
def test_good_oneline_function_custom(self):
"""make sure bad one-line format is caught"""
bad_oneline_func = '''
def my_oneliner(arg1, arg2, arg3): #@
pass
'''
block = astroid.extract_node(bad_oneline_func)
with self.assertNoMessages():
self.checker.visit_functiondef(block)
@testutils.set_config(single_line_args_limit=3)
def test_bad_oneline_function_custom(self):
"""make sure bad one-line format is caught"""
bad_oneline_func = '''
def my_oneliner(arg1, arg2, arg3, arg4): #@
pass
'''
block = astroid.extract_node(bad_oneline_func)
with self.assertAddsMessages(
testutils.Message(
msg_id='invalid-oneline-function-format',
line=2,
args=3
)
):
self.checker.visit_functiondef(block)
class TestMethodArgsIndentChecker(helpers.ProsperCheckerTestCase):
CHECKER_CLASS = ArgsIndentChecker
def test_good_method(self):
"""validate methods get the same lint treatment"""
good_class = '''
class FancyClass: #@
"""class docstring"""
def foo(
self,
arg1,
arg2,
optional_arg=None
):
pass
'''
block = astroid.extract_node(good_class)
with self.assertNoMessages():
self.checker.visit_classdef(block)
def test_good_method_empty(self):
"""validate methods get the same lint treatment"""
good_class = '''
class FancyClass: #@
"""class docstring"""
def foo():
pass
'''
block = astroid.extract_node(good_class)
with self.assertNoMessages():
self.checker.visit_classdef(block)
def test_good_method_many(self):
"""validate all methods are good in class"""
good_long_class = '''
class FancierClass: #@
def foo(
self,
arg1,
arg2,
optional_arg=None
):
pass
def bar(
self,
arg1,
arg2,
optional_arg=None
):
pass
'''
block = astroid.extract_node(good_long_class)
with self.assertNoMessages():
self.checker.visit_classdef(block)
def test_bad_method(self):
"""validate expected error with invalid args format"""
bad_class = '''
class BadClass: #@
"""class docstring"""
def foo(self,
arg1,
arg2,
optional_arg=None
):
pass
'''
block = astroid.extract_node(bad_class)
with self.assertAddsMessages(
testutils.Message(
msg_id='invalid-function-arg-format',
line=4
)
):
self.checker.visit_classdef(block)
@testutils.set_config(kevlin_func_args=False)
def test_bad_method_override(self):
"""validate skip behavior for class args"""
bad_class = '''
class BadClass: #@
"""class docstring"""
def foo(self,
arg1,
arg2,
optional_arg=None
):
pass
'''
block = astroid.extract_node(bad_class)
with self.assertNoMessages():
self.checker.visit_classdef(block)
def test_good_oneline_method(self):
"""validate one-line method limits (2+1)"""
good_oneline_class = '''
class OneLineClass: #@
def foo(self, arg1, arg2): # +1 for ``self``
pass
'''
block = astroid.extract_node(good_oneline_class)
with self.assertNoMessages():
self.checker.visit_classdef(block)
def test_too_many_oneline_method(self):
"""validate error for too many one-line method args"""
bad_oneline_class = '''
class OneLineClass: #@
def foo(self, arg1, arg2, arg3): # +1 for ``self``
pass
'''
block = astroid.extract_node(bad_oneline_class)
with self.assertAddsMessages(
testutils.Message(
msg_id='invalid-oneline-function-format',
line=3,
args=2
)
):
self.checker.visit_classdef(block)
@testutils.set_config(single_line_args_limit=3)
def test_good_oneline_method_custom(self):
"""validate one-line method limits (2+1)"""
good_oneline_class = '''
class OneLineClass: #@
def foo(self, arg1, arg2, arg3): # +1 for ``self``
pass
'''
block = astroid.extract_node(good_oneline_class)
with self.assertNoMessages():
self.checker.visit_classdef(block)
@testutils.set_config(single_line_args_limit=3)
def test_bad_oneline_method_custom(self):
"""validate error for too many one-line method args"""
bad_oneline_class = '''
class OneLineClass: #@
def foo(self, arg1, arg2, arg3, arg4): # +1 for ``self``
pass
'''
block = astroid.extract_node(bad_oneline_class)
with self.assertAddsMessages(
testutils.Message(
msg_id='invalid-oneline-function-format',
line=3,
args=3
)
):
self.checker.visit_classdef(block)
class TestCallFuncArgsIndentChecker(helpers.ProsperCheckerTestCase):
CHECKER_CLASS = ArgsIndentChecker
def test_good_call_func(self):
"""make sure good practice is supported"""
good_call = '''
result = my_function(
arg1,
arg2,
arg3=None
)
'''
block = astroid.extract_node(good_call)
with self.assertNoMessages():
self.checker.visit_callfunc(block)
def test_good_call_func_empty(self):
"""make sure good practice is supported"""
good_call = '''
result = my_function()
'''
block = astroid.extract_node(good_call)
with self.assertNoMessages():
self.checker.visit_callfunc(block)
def test_bad_call_layout(self):
"""make sure bad format is caught"""
bad_call = '''
result = my_function(arg1, #@
arg2,
optional_arg=None
)
'''
block = astroid.extract_node(bad_call)
with self.assertAddsMessages(
testutils.Message(
msg_id='invalid-function-arg-format',
line=2
)
):
self.checker.visit_callfunc(block)
@testutils.set_config(kevlin_func_args=False)
def test_bad_call_layout_override(self):
"""make sure bad format is caught"""
bad_call = '''
result = my_function(arg1, #@
arg2,
optional_arg=None
)
'''
block = astroid.extract_node(bad_call)
with self.assertNoMessages():
self.checker.visit_callfunc(block)
| 28.855422
| 68
| 0.595303
| 1,030
| 9,580
| 5.307767
| 0.106796
| 0.033656
| 0.072983
| 0.088348
| 0.901043
| 0.866837
| 0.860252
| 0.848729
| 0.806475
| 0.784525
| 0
| 0.011828
| 0.302818
| 9,580
| 331
| 69
| 28.942598
| 0.806708
| 0.093841
| 0
| 0.802158
| 0
| 0
| 0.271444
| 0.028825
| 0
| 0
| 0
| 0
| 0.07554
| 1
| 0.07554
| false
| 0.05036
| 0.014388
| 0
| 0.125899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
1da0ae63ab95e05eee8accec2992cb09befb7ca5
| 3,909
|
py
|
Python
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/tests/stores/test_pet.py
|
srikiran1/cookiecutter-connexion-microcosm-service
|
da1dea38e08379e8415202b6aed23f79d2d9d24d
|
[
"MIT"
] | 2
|
2019-12-10T03:08:09.000Z
|
2019-12-10T03:08:11.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/tests/stores/test_pet.py
|
srikiran1/cookiecutter-connexion-microcosm-service
|
da1dea38e08379e8415202b6aed23f79d2d9d24d
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/tests/stores/test_pet.py
|
srikiran1/cookiecutter-connexion-microcosm-service
|
da1dea38e08379e8415202b6aed23f79d2d9d24d
|
[
"MIT"
] | 1
|
2019-12-10T03:08:03.000Z
|
2019-12-10T03:08:03.000Z
|
"""
Example persistence tests.
Tests cover model-specific constraints under the assumption that framework conventions
handle most boilerplate.
"""
from hamcrest import (
assert_that,
equal_to,
is_,
)
from mock import Mock, patch
from {{ cookiecutter.project_name }}.stores.pet import PetStore
class TestPetStore(object):
def setup(self):
self.graph = Mock()
self.store = PetStore(self.graph)
@patch("microcosm_postgres.store.SessionContext.session")
def test_find(self, mock_session):
tags = ["a", "b"]
limit = 100
offset = 200
order_by = "name"
direction = "ASCENDING"
mock_order_by = mock_session.query.return_value.filter.return_value.order_by
assert_that(self.store.find(tags, limit, offset, order_by, direction), is_(equal_to(
mock_order_by.return_value.limit.return_value.offset.return_value.all.return_value)))
mock_session.query.assert_called_once_with(self.store.model_class)
mock_session.query.return_value.filter.assert_called_once()
mock_order_by.assert_called_once_with(self.store.model_class.name)
mock_order_by.return_value.limit.assert_called_once_with(limit)
mock_order_by.return_value.limit.return_value.offset.assert_called_once_with(offset)
mock_order_by.return_value.limit.return_value.offset.return_value.all.assert_called_once()
@patch("microcosm_postgres.store.SessionContext.session")
def test_find_created(self, mock_session):
tags = ["a", "b"]
limit = 100
offset = 200
order_by = "created"
direction = "ASCENDING"
mock_order_by = mock_session.query.return_value.filter.return_value.order_by
assert_that(self.store.find(tags, limit, offset, order_by, direction), is_(equal_to(
mock_order_by.return_value.limit.return_value.offset.return_value.all.return_value)))
mock_session.query.assert_called_once_with(self.store.model_class)
mock_session.query.return_value.filter.assert_called_once()
mock_order_by.assert_called_once_with(self.store.model_class.created_at)
mock_order_by.return_value.limit.assert_called_once_with(limit)
mock_order_by.return_value.limit.return_value.offset.assert_called_once_with(offset)
mock_order_by.return_value.limit.return_value.offset.return_value.all.assert_called_once()
@patch("microcosm_postgres.store.SessionContext.session")
def test_find_updated(self, mock_session):
tags = ["a", "b"]
limit = 100
offset = 200
order_by = "updated"
direction = "ASCENDING"
mock_order_by = mock_session.query.return_value.filter.return_value.order_by
assert_that(self.store.find(tags, limit, offset, order_by, direction), is_(equal_to(
mock_order_by.return_value.limit.return_value.offset.return_value.all.return_value)))
mock_session.query.assert_called_once_with(self.store.model_class)
mock_session.query.return_value.filter.assert_called_once()
mock_order_by.assert_called_once_with(self.store.model_class.updated_at)
mock_order_by.return_value.limit.assert_called_once_with(limit)
mock_order_by.return_value.limit.return_value.offset.assert_called_once_with(offset)
mock_order_by.return_value.limit.return_value.offset.return_value.all.assert_called_once()
@patch("microcosm_postgres.store.SessionContext.session")
def test_count(self, mock_session):
tags = ["a", "b"]
mock_count = mock_session.query.return_value.filter.return_value.count
assert_that(self.store.count(tags), is_(equal_to(mock_count.return_value)))
mock_session.query.assert_called_once_with(self.store.model_class)
mock_session.query.return_value.filter.assert_called_once()
mock_count.assert_called_once()
| 39.887755
| 98
| 0.73625
| 531
| 3,909
| 5.048964
| 0.12806
| 0.176427
| 0.125326
| 0.096979
| 0.835882
| 0.835882
| 0.828049
| 0.828049
| 0.811637
| 0.789631
| 0
| 0.005533
| 0.167818
| 3,909
| 97
| 99
| 40.298969
| 0.818629
| 0
| 0
| 0.641791
| 0
| 0
| 0.064079
| 0.049987
| 0
| 0
| 0
| 0
| 0.38806
| 0
| null | null | 0
| 0.044776
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d524cb57283d878c3921f9136d7c58dced5c3b07
| 6,222
|
py
|
Python
|
dt/tests/test_diff.py
|
mattgeibel/ddl_tools
|
a47b1f32075bcce0c3c949b358666866621c7937
|
[
"MIT"
] | 3
|
2020-04-28T15:39:27.000Z
|
2021-01-07T23:04:40.000Z
|
dt/tests/test_diff.py
|
mattgeibel/ddl_tools
|
a47b1f32075bcce0c3c949b358666866621c7937
|
[
"MIT"
] | null | null | null |
dt/tests/test_diff.py
|
mattgeibel/ddl_tools
|
a47b1f32075bcce0c3c949b358666866621c7937
|
[
"MIT"
] | 2
|
2019-11-25T19:17:29.000Z
|
2020-03-15T13:18:15.000Z
|
import unittest
from dt.model import ShardKey
from dt.diff import *
class TestDDLCompare(unittest.TestCase):
"""
Tests the DDLCompare class. Note that changes in one are reflected in the other.
These tests just compare the basic differences.
"""
def test_new_and_drop_table(self):
"""Tests adding / dropping a table. It's added in one and dropped in the other."""
dc = DDLCompare()
db1 = Database(database_name="database1")
db2 = Database(database_name="database2")
db1.add_table(Table(table_name="table_from_1"))
diff1, diff2 = dc.compare_databases(db1=db1, db2=db2)
self.assertTrue(type(diff1[0]) is TableDroppedDifference)
self.assertEqual(diff1[0].table_name, "table_from_1")
self.assertTrue(type(diff2[0]) is TableCreatedDifference)
self.assertEqual(diff2[0].table_name, "table_from_1")
def test_add_and_drop_column(self):
"""Tests adding / dropping a column from a table."""
dc = DDLCompare()
db1 = Database(database_name="database1")
db2 = Database(database_name="database2")
t1 = Table(table_name="table1")
db1.add_table(t1)
t2 = Table(table_name="table1")
t2.add_column(column=Column(column_name="column1", column_type="INT"))
db2.add_table(t2)
diff1, diff2 = dc.compare_databases(db1=db1, db2=db2)
self.assertTrue(type(diff1[0] is ColumnAddedDifference))
self.assertEqual(diff1[0].table_name, "table1")
self.assertEqual(diff1[0].column.column_name, "column1")
self.assertEqual(diff1[0].column.column_type, "INT")
self.assertTrue(type(diff2[0] is ColumnDroppedDifference))
self.assertEqual(diff2[0].table_name, "table1")
self.assertEqual(diff2[0].column.column_name, "column1")
self.assertEqual(diff2[0].column.column_type, "INT")
def test_change_column(self):
"""Tests adding / dropping a column from a table."""
dc = DDLCompare()
db1 = Database(database_name="database1")
db2 = Database(database_name="database2")
t1 = Table(table_name="table1")
t1.add_column(column=Column(column_name="column1", column_type="INT"))
db1.add_table(t1)
t2 = Table(table_name="table1")
t2.add_column(column=Column(column_name="column1", column_type="FLOAT"))
db2.add_table(t2)
diff1, diff2 = dc.compare_databases(db1=db1, db2=db2)
self.assertTrue(type(diff1[0] is ColumnModifiedDifference))
self.assertEqual(diff1[0].table_name, "table1")
self.assertEqual(diff1[0].column.column_name, "column1")
self.assertEqual(diff1[0].column.column_type, "FLOAT")
self.assertTrue(type(diff2[0] is ColumnModifiedDifference))
self.assertEqual(diff2[0].table_name, "table1")
self.assertEqual(diff2[0].column.column_name, "column1")
self.assertEqual(diff2[0].column.column_type, "INT")
def test_add_and_drop_pk(self):
"""Tests adding / dropping a column from a table."""
dc = DDLCompare()
db1 = Database(database_name="database1")
db2 = Database(database_name="database2")
t1 = Table(table_name="table1", primary_key="column1")
db1.add_table(t1)
t2 = Table(table_name="table1")
db2.add_table(t2)
diff1, diff2 = dc.compare_databases(db1=db1, db2=db2)
self.assertTrue(type(diff1[0] is PrimaryKeyDroppedDifference))
self.assertEqual(diff1[0].table_name, "table1")
self.assertEqual(diff1[0].table.primary_key, ["column1"])
self.assertTrue(type(diff2[0] is PrimaryKeyAddedDifference))
self.assertEqual(diff2[0].table_name, "table1")
self.assertEqual(diff1[0].table.primary_key, ["column1"])
def test_add_and_drop_sk(self):
"""Tests adding / dropping a column from a table."""
dc = DDLCompare()
db1 = Database(database_name="database1")
db2 = Database(database_name="database2")
t1 = Table(table_name="table1", shard_key=ShardKey(shard_keys="column1", number_shards=16))
db1.add_table(t1)
t2 = Table(table_name="table1")
db2.add_table(t2)
diff1, diff2 = dc.compare_databases(db1=db1, db2=db2)
self.assertTrue(type(diff1[0] is ShardKeyDroppedDifference))
self.assertEqual(diff1[0].table_name, "table1")
self.assertTrue(type(diff2[0] is ShardKeyAddedDifference))
self.assertEqual(diff2[0].table_name, "table1")
# foreign key added/dropped
def test_add_and_drop_fk(self):
"""Tests adding / dropping a column from a table."""
dc = DDLCompare()
db1 = Database(database_name="database1")
db2 = Database(database_name="database2")
t1 = Table(table_name="table1", primary_key="column1")
db1.add_table(t1)
t2 = Table(table_name="table1")
db2.add_table(t2)
diff1, diff2 = dc.compare_databases(db1=db1, db2=db2)
self.assertTrue(type(diff1[0] is PrimaryKeyDroppedDifference))
self.assertEqual(diff1[0].table_name, "table1")
self.assertTrue(type(diff2[0] is PrimaryKeyAddedDifference))
self.assertEqual(diff2[0].table_name, "table1")
# generic relationship added/dropped
def test_add_and_drop_rel(self):
"""Tests adding / dropping a column from a table."""
dc = DDLCompare()
db1 = Database(database_name="database1")
db2 = Database(database_name="database2")
t1 = Table(table_name="table1")
t1.add_relationship(relationship=GenericRelationship(from_table="table_1", to_table="table_2",
conditions="table1.col1 = table2.col2"))
db1.add_table(t1)
t2 = Table(table_name="table1")
db2.add_table(t2)
diff1, diff2 = dc.compare_databases(db1=db1, db2=db2)
self.assertTrue(type(diff1[0] is GenericRelationshipDroppedDifference))
self.assertEqual(diff1[0].table_name, "table1")
self.assertTrue(type(diff2[0] is GenericRelationshipAddedDifference))
self.assertEqual(diff2[0].table_name, "table1")
| 36.816568
| 102
| 0.656541
| 764
| 6,222
| 5.187173
| 0.126963
| 0.061317
| 0.09084
| 0.068887
| 0.828917
| 0.794095
| 0.754227
| 0.721423
| 0.721423
| 0.721423
| 0
| 0.048099
| 0.218097
| 6,222
| 168
| 103
| 37.035714
| 0.766495
| 0.088557
| 0
| 0.700935
| 0
| 0
| 0.080855
| 0
| 0
| 0
| 0
| 0
| 0.35514
| 1
| 0.065421
| false
| 0
| 0.028037
| 0
| 0.102804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d53f2634fd652675f5ec5d88de8e24e54ee3c410
| 10,208
|
py
|
Python
|
Chapter3Plots.py
|
AlastairWiseman/ODE
|
3fdfc18e8376dab8042c300db7bda91ad27c7c78
|
[
"MIT"
] | null | null | null |
Chapter3Plots.py
|
AlastairWiseman/ODE
|
3fdfc18e8376dab8042c300db7bda91ad27c7c78
|
[
"MIT"
] | null | null | null |
Chapter3Plots.py
|
AlastairWiseman/ODE
|
3fdfc18e8376dab8042c300db7bda91ad27c7c78
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 10:43:34 2018
@author: Alastair Wiseman
"""
import numpy as np
import matplotlib.pyplot as pltt
import RungeKuttaMethod as RKM
import RungeKuttaCoefficients as RKC
import LinearStabilityDomains as LSD
import LinearMultistepMethodCoefficients as LMMC
import matplotlib.patches as mpatches
import LinearMultistepMethod as LMM
#Plot 1
def func1(t, Y):
function1 = -2.0 * Y[0] + 1.0 * Y[1] + 2.0 * np.sin(t)
function2 = 1.0 * Y[0] - 2.0 * Y[1] + 2.0 * (np.cos(t) - np.sin(t))
return np.array([function1, function2])
def func2(t, Y):
function1 = -2.0 * Y[0] + 1.0 * Y[1] + 2.0 * np.sin(t)
function2 = 998.0 * Y[0] - 999.0 * Y[1] + 999.0 * (np.cos(t) - np.sin(t))
return np.array([function1, function2])
def solution(t):
solution1 = 2.0 * np.exp(-t) + np.sin(t)
solution2 = 2.0 * np.exp(-t) + np.cos(t)
return np.array([solution1, solution2])
t = np.linspace(0, 12, 1000)
#Initialize a Figure
fig = pltt.figure()
#Add Axes to Figure
ax = fig.add_subplot(111)
pltt.plot(t, solution(t)[0], color = 'C0', label = '$y^{[1]}$')
pltt.plot(t, solution(t)[1], color = 'C1', label = '$y^{[2]}$')
ax.legend(fontsize = 18)
pltt.xlim(0, 11)
pltt.minorticks_off()
# making the top and right spine invisible:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
# moving bottom spine up to y=0 position:
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.grid(b = 'on')
points1 = RKM.RungeKuttaMethod(func1, [0.0, [2.0, 3.0]], 0.1, 11, *RKC.ERK4C,
detailed = True, varyStep = True, tol = 0.01)
points2 = RKM.RungeKuttaMethod(func2, [0.0, [2.0, 3.0]], 0.1, 11, *RKC.ERK4C,
detailed = True, varyStep = True, tol = 0.01)
stepSize1 = []
stepSize2 = []
for i in xrange(len(points1[0]) -1):
stepSize1.append(points1[0][i + 1] - points1[0][i])
for i in xrange(len(points2[0]) -1):
stepSize2.append(points2[0][i + 1] - points2[0][i])
#Initialize a Figure
fig = pltt.figure()
#Add Axes to Figure
ax = fig.add_subplot(111)
pltt.plot(t, solution(t)[0], color = 'C0', label = '$y^{[1]}$')
pltt.plot(t, solution(t)[1], color = 'C1', label = '$y^{[2]}$')
pltt.plot(points1[0], points1[1][: , 0], 'o', color = 'C0',
label = '$y^{[1]}_n$')
pltt.plot(points1[0], points1[1][: , 1], 'o', color = 'C1',
label = '$y^{[2]}_n$')
ax.legend(fontsize = 16)
pltt.xlim(0, 11)
pltt.minorticks_off()
# making the top and right spine invisible:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
# moving bottom spine up to y=0 position:
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.grid(b = 'on')
#Initialize a Figure
fig = pltt.figure()
#Add Axes to Figure
ax = fig.add_subplot(111)
pltt.plot(t, solution(t)[0], color = 'C0', label = '$y^{[1]}$')
pltt.plot(t, solution(t)[1], color = 'C1', label = '$y^{[2]}$')
pltt.plot(points2[0], points2[1][: , 0], 'o', color = 'C0',
label = '$y^{[1]}_n$')
pltt.plot(points2[0], points2[1][: , 1], 'o', color = 'C1',
label = '$y^{[2]}_n$')
ax.legend(fontsize = 16)
pltt.xlim(0, 11)
pltt.minorticks_off()
# making the top and right spine invisible:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
# moving bottom spine up to y=0 position:
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.grid(b = 'on')
points3 = RKM.RungeKuttaMethod(func1, [0.0, [2.0, 3.0]], 0.1, 11, *RKC.IRK2GL,
detailed = True, varyStep = True, tol = 0.01)
points4 = RKM.RungeKuttaMethod(func2, [0.0, [2.0, 3.0]], 0.1, 11, *RKC.IRK2GL,
detailed = True, varyStep = True, tol = 0.01)
stepSize3 = []
stepSize4 = []
for i in xrange(len(points3[0]) -1):
stepSize3.append(points3[0][i + 1] - points3[0][i])
for i in xrange(len(points4[0]) -1):
stepSize4.append(points4[0][i + 1] - points4[0][i])
#Initialize a Figure
fig = pltt.figure()
#Add Axes to Figure
ax = fig.add_subplot(111)
pltt.plot(t, solution(t)[0], color = 'C0', label = '$y^{[1]}$')
pltt.plot(t, solution(t)[1], color = 'C1', label = '$y^{[2]}$')
pltt.plot(points3[0], points3[1][: , 0], 'o', color = 'C0',
label = '$y^{[1]}_n$')
pltt.plot(points3[0], points3[1][: , 1], 'o', color = 'C1',
label = '$y^{[2]}_n$')
ax.legend(fontsize = 16)
pltt.xlim(0, 11)
pltt.minorticks_off()
# making the top and right spine invisible:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
# moving bottom spine up to y=0 position:
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.grid(b = 'on')
#Initialize a Figure
fig = pltt.figure()
#Add Axes to Figure
ax = fig.add_subplot(111)
pltt.plot(t, solution(t)[0], color = 'C0', label = '$y^{[1]}$')
pltt.plot(t, solution(t)[1], color = 'C1', label = '$y^{[2]}$')
pltt.plot(points4[0], points4[1][: , 0], 'o', color = 'C0',
label = '$y^{[1]}_n$')
pltt.plot(points4[0], points4[1][: , 1], 'o', color = 'C1',
label = '$y^{[2]}_n$')
ax.legend(fontsize = 16)
pltt.xlim(0, 11)
pltt.minorticks_off()
# making the top and right spine invisible:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
# moving bottom spine up to y=0 position:
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.grid(b = 'on')
#Plot 2
LSD.RKLSDPlotter(RKC.ERK1FE[0], RKC.ERK1FE[1], -6, 1, -4, 4, 1000, False)
LSD.RKLSDPlotter(RKC.ERK2MP[0], RKC.ERK2MP[1], -6, 1, -4, 4, 1000, False)
LSD.RKLSDPlotter(RKC.ERK3K[0], RKC.ERK3K[1], -6, 1, -4, 4, 1000, False)
LSD.RKLSDPlotter(RKC.ERK4C[0], RKC.ERK4C[1], -6, 1, -4, 4, 1000, False)
LSD.RKLSDPlotter(RKC.ERK5B[0], RKC.ERK5B[1], -6, 1, -4, 4, 1000, False)
LSD.RKLSDPlotter(RKC.ERK5KN[0], RKC.ERK5KN[1], -6, 1, -4, 4, 1000, False)
#Plot 3
LSD.LMMLSDPlotterComplete(LMMC.AB1[0], LMMC.AB1[1], -2, 1, -2, 2, 3000, 1000,
False)
LSD.LMMLSDPlotterComplete(LMMC.AB3[0], LMMC.AB3[1], -2, 1, -2, 2, 3000, 1000,
False)
LSD.LMMLSDPlotterComplete(LMMC.AB5[0], LMMC.AB5[1], -2, 1, -2, 2, 3000, 1000,
False)
LSD.LMMLSDPlotterComplete(LMMC.AM1B[0], LMMC.AM1B[1], -8, 1, -4, 4, 2000,
1000, False)
pltt.xlim(-7.9, 2.9)
pltt.ylim(-4.0, 4.0)
LSD.LMMLSDPlotterComplete(LMMC.AM2[0], LMMC.AM2[1], -6, 1, -4, 4, 1000, 1000,
False)
LSD.LMMLSDPlotterComplete(LMMC.AM4[0], LMMC.AM4[1], -6, 1, -4, 4, 1000, 1000,
False)
#Plot 4
LSD.RKLSDPlotter(RKC.IRK2GL[0], RKC.IRK2GL[1], -8, 1, -4, 4, 100, False)
pltt.xlim(-7.9, 2.9)
pltt.ylim(-4.0, 4.0)
#Plot 5
LSD.LMMLSDPlotterComplete(LMMC.BDF1[0], LMMC.BDF1[1], -1, 3, -1.5, 1.5, 1000,
1000, False)
LSD.LMMLSDPlotterComplete(LMMC.BDF2[0], LMMC.BDF2[1], -2, 6, -3, 3, 1000,
1000, False)
LSD.LMMLSDPlotterComplete(LMMC.BDF3[0], LMMC.BDF3[1], -3, 9, -4.5, 4.5, 1000,
1000, False)
LSD.LMMLSDPlotterComplete(LMMC.BDF4[0], LMMC.BDF4[1], -6, 18, -9, 9, 2000,
3000, False)
LSD.LMMLSDPlotterComplete(LMMC.BDF5[0], LMMC. BDF5[1], -10, 30, -15, 15, 2000,
3000, False)
LSD.LMMLSDPlotterComplete(LMMC.BDF6[0], LMMC. BDF6[1], -16, 48, -24, 24, 2000,
3000, False)
#Plot 6
def func1(t, Y):
function1 = -2.0 * Y[0] + 1.0 * Y[1] + 2.0 * np.sin(t)
function2 = 1.0 * Y[0] - 2.0 * Y[1] + 2.0 * (np.cos(t) - np.sin(t))
return np.array([function1, function2])
def func2(t, Y):
function1 = -2.0 * Y[0] + 1.0 * Y[1] + 2.0 * np.sin(t)
function2 = 998.0 * Y[0] - 999.0 * Y[1] + 999.0 * (np.cos(t) - np.sin(t))
return np.array([function1, function2])
def solution(t):
solution1 = 2.0 * np.exp(-t) + np.sin(t)
solution2 = 2.0 * np.exp(-t) + np.cos(t)
return np.array([solution1, solution2])
t = np.linspace(0, 12, 1000)
points1 = LMM.LinearMultistepMethod(func1, [0.0, [2.0, 3.0]], 1.0, 11,
*LMMC.BDF6, detailed = True)
points2 = LMM.LinearMultistepMethod(func2, [0.0, [2.0, 3.0]], 1.0, 11,
*LMMC.BDF6, detailed = True)
#Initialize a Figure
fig = pltt.figure()
#Add Axes to Figure
ax = fig.add_subplot(111)
pltt.plot(t, solution(t)[0], color = 'C0', label = '$y^{[1]}$')
pltt.plot(t, solution(t)[1], color = 'C1', label = '$y^{[2]}$')
pltt.plot(points1[0], points1[1][: , 0], 'o', color = 'C0',
label = '$y^{[1]}_n$')
pltt.plot(points1[0], points1[1][: , 1], 'o', color = 'C1',
label = '$y^{[2]}_n$')
ax.legend(fontsize = 16)
pltt.xlim(0, 11)
pltt.minorticks_off()
# making the top and right spine invisible:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
# moving bottom spine up to y=0 position:
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.grid(b = 'on')
#Initialize a Figure
fig = pltt.figure()
#Add Axes to Figure
ax = fig.add_subplot(111)
pltt.plot(t, solution(t)[0], color = 'C0', label = '$y^{[1]}$')
pltt.plot(t, solution(t)[1], color = 'C1', label = '$y^{[2]}$')
pltt.plot(points2[0], points2[1][: , 0], 'o', color = 'C0',
label = '$y^{[1]}_n$')
pltt.plot(points2[0], points2[1][: , 1], 'o', color = 'C1',
label = '$y^{[2]}_n$')
ax.legend(fontsize = 16)
pltt.xlim(0, 11)
pltt.minorticks_off()
# making the top and right spine invisible:
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
# moving bottom spine up to y=0 position:
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.grid(b = 'on')
| 29.165714
| 80
| 0.568084
| 1,597
| 10,208
| 3.592987
| 0.097683
| 0.03625
| 0.021959
| 0.041478
| 0.811955
| 0.804984
| 0.768212
| 0.743465
| 0.737191
| 0.737191
| 0
| 0.09799
| 0.225216
| 10,208
| 350
| 81
| 29.165714
| 0.627513
| 0.093456
| 0
| 0.751244
| 0
| 0
| 0.063177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.039801
| 0
| 0.099502
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5b15327116507b94cc3b0a5f86ce1f20d4847fa
| 14,667
|
py
|
Python
|
venv/lib/python3.8/site-packages/spaceone/api/secret/v1/secret_pb2_grpc.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/secret/v1/secret_pb2_grpc.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/secret/v1/secret_pb2_grpc.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from spaceone.api.secret.v1 import secret_pb2 as spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2
class SecretStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.create = channel.unary_unary(
'/spaceone.api.secret.v1.Secret/create',
request_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.CreateSecretRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretInfo.FromString,
)
self.update = channel.unary_unary(
'/spaceone.api.secret.v1.Secret/update',
request_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.UpdateSecretRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretInfo.FromString,
)
self.delete = channel.unary_unary(
'/spaceone.api.secret.v1.Secret/delete',
request_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.update_data = channel.unary_unary(
'/spaceone.api.secret.v1.Secret/update_data',
request_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.UpdateSecretDataRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.get_data = channel.unary_unary(
'/spaceone.api.secret.v1.Secret/get_data',
request_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretDataInfo.FromString,
)
self.get = channel.unary_unary(
'/spaceone.api.secret.v1.Secret/get',
request_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.GetSecretRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretInfo.FromString,
)
self.list = channel.unary_unary(
'/spaceone.api.secret.v1.Secret/list',
request_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretQuery.SerializeToString,
response_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretsInfo.FromString,
)
self.stat = channel.unary_unary(
'/spaceone.api.secret.v1.Secret/stat',
request_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretStatQuery.SerializeToString,
response_deserializer=google_dot_protobuf_dot_struct__pb2.Struct.FromString,
)
class SecretServicer(object):
"""Missing associated documentation comment in .proto file."""
def create(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def update(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def update_data(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def get_data(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def get(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def stat(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SecretServicer_to_server(servicer, server):
rpc_method_handlers = {
'create': grpc.unary_unary_rpc_method_handler(
servicer.create,
request_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.CreateSecretRequest.FromString,
response_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretInfo.SerializeToString,
),
'update': grpc.unary_unary_rpc_method_handler(
servicer.update,
request_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.UpdateSecretRequest.FromString,
response_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretInfo.SerializeToString,
),
'delete': grpc.unary_unary_rpc_method_handler(
servicer.delete,
request_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'update_data': grpc.unary_unary_rpc_method_handler(
servicer.update_data,
request_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.UpdateSecretDataRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'get_data': grpc.unary_unary_rpc_method_handler(
servicer.get_data,
request_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretRequest.FromString,
response_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretDataInfo.SerializeToString,
),
'get': grpc.unary_unary_rpc_method_handler(
servicer.get,
request_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.GetSecretRequest.FromString,
response_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretInfo.SerializeToString,
),
'list': grpc.unary_unary_rpc_method_handler(
servicer.list,
request_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretQuery.FromString,
response_serializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretsInfo.SerializeToString,
),
'stat': grpc.unary_unary_rpc_method_handler(
servicer.stat,
request_deserializer=spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretStatQuery.FromString,
response_serializer=google_dot_protobuf_dot_struct__pb2.Struct.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'spaceone.api.secret.v1.Secret', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Secret(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.secret.v1.Secret/create',
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.CreateSecretRequest.SerializeToString,
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.secret.v1.Secret/update',
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.UpdateSecretRequest.SerializeToString,
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.secret.v1.Secret/delete',
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def update_data(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.secret.v1.Secret/update_data',
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.UpdateSecretDataRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def get_data(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.secret.v1.Secret/get_data',
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretRequest.SerializeToString,
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretDataInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.secret.v1.Secret/get',
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.GetSecretRequest.SerializeToString,
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.secret.v1.Secret/list',
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretQuery.SerializeToString,
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretsInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def stat(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.secret.v1.Secret/stat',
spaceone_dot_api_dot_secret_dot_v1_dot_secret__pb2.SecretStatQuery.SerializeToString,
google_dot_protobuf_dot_struct__pb2.Struct.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 48.89
| 128
| 0.684734
| 1,549
| 14,667
| 6.056165
| 0.071659
| 0.076751
| 0.059695
| 0.072487
| 0.92261
| 0.919945
| 0.908539
| 0.883488
| 0.832321
| 0.799488
| 0
| 0.010104
| 0.244222
| 14,667
| 299
| 129
| 49.053512
| 0.836175
| 0.05843
| 0
| 0.586614
| 1
| 0
| 0.075594
| 0.045269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070866
| false
| 0
| 0.015748
| 0.031496
| 0.129921
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
63415e53e5e4f9e1b972750fec231cc58a619907
| 2,938
|
py
|
Python
|
spy.py
|
shahriarsany/Danger
|
eaa0decd2ed7f12b8fd6478d762fcb547862781c
|
[
"MIT"
] | null | null | null |
spy.py
|
shahriarsany/Danger
|
eaa0decd2ed7f12b8fd6478d762fcb547862781c
|
[
"MIT"
] | null | null | null |
spy.py
|
shahriarsany/Danger
|
eaa0decd2ed7f12b8fd6478d762fcb547862781c
|
[
"MIT"
] | null | null | null |
import marshal,zlib,base64
exec(zlib.decompress(base64.b64decode("eJzNWXtv08gW/z+fYtaraG0pcew2fVButOplC1RQqEi5gABZk+QkserY3plxmy7iu99z5pHEaWgLFFgXEXvmvM/vjOeMf/+tU0nRGaR5B/ILVl6paZFvN9JZWQiFj+N0koFyz1xMSi4kuOesmEzSfOIeBc9Hxcw9yWJ4vmSVV9LdqnQGjYkAyFmP/fEhx795PHgfP9zemv3RKEWaK19PBw0BssoUkjlLQvOTjAsx48r3pOJCeS02LnIi8wY8z0Fst//yWGBFGRn4RJYLJHJehIdiUs0gV6d6xm8wvEYghyItVVrkPa+fFZdZIVLZQlcvGQofXaYjNWVSoVTJFEh0pygy1C/YJQxkikNewykL+WiUcKvF96aFJFtzHJE970+8nUJW9rynhRbDShDkV016kXubpWlrvXaJUrx2mwKLdyMYc3S2tx852acU8WKsjQNxAaLFKlnxLLti+xFyqKsSehimL9hstUivZe/aJq1uwCmMdyIzYLS+qGYDjDXqteTkXiWBpTlTU9CuWQnOgNbNFlwsLUAnBoWEhQVS9bz6GB+a/ElVCEiUqNyEse44HwrgEqQDsHeL8oovtRPI0RPBJ0gga0bUpu5myitdMuk/aAtxto1UdpkiyIAPp0zA35UJ1o0WzpcGopxSFPOrmmlrg7dY9RozxVn/5aNn/R2m+TTAhwUWF3LeHrC2ZmpbwDuQePHWXhjhX7yAfk2HJr+T4DW4e/uRBvMGmZbybjBvT5Uq6zldHblD1J6enZ32dbAI5jZ58tZwyQygpHWxpvv6qCu29dJZGnGG5FRsmpcNQF3SOquhNAU+wqKUqDhctUjiempFS98WUu8xzyRsJqnB/CZCB7qbaHR8HQEtjLTYGzr9Q7GSftBopGOWQe7jeyTEoYuA/afH4gPtuaOn5T6hMCA9jRMtzFPlx4Y/L/QrTIYENMuqXxFmBaZspQJGv3nBV4nVIp2vRuzv7EykoFc999IrGMwViJxnzKNFUXosSweCiyvLgVFlsyI/BwQtV8MpspvFMzQ/JMJWICswTZaNcGagPrhyCDHoENYYulbeybKxGNWPlA/Lp+X4i+klyemrl2/fJWfvTo8SU10t47VmSCictQHStZAS1NQt3ek59fS/2Ss4Qrssh2k+LnysK7y/aTUKw9CmDOZDrBh2rL09EqIQB9eEAg37Xp/UslMt7rlJBHuBADm84GnGBxkQDBbptWVhpDlJAy7T4aMixz3JMmhmZ9Lz3jd9LodUvYH8yJr+DF/pWDGBW0x0QXMF4xkSN0ft5qzdfMeaTw+aJwfN/gpRBheQ9ZzOv47++/qJmQwakP0bTDp+8fils6jRQCjREjNKsjQHX0I2xs0T3gbWULzF1I+9T3T3+YP4kHumqpAyJEafJkLIh8UIfK9S4/a+F9Qkm4XMys75DFrsgmeVU7GQZExAVUTz+YB90lSfKa+LxOr1px5DgzqDIUKelBkW5qjKwKLMFZPMGlah4kqhQTIL+/3nfQ1m3JYtbMAX0OI+uJ3F+OeYzBPanKVSJcU4cVuqHnv/sUELcWJ3DThgXignxT9plvHOThgx/4QPcQEr5PQhO84VZAwH2Ms+e8viKInjZDdgh2WZwRsYPEtVZ2d7L9zeZf6zp2cnzyl158CewPC8CNijqShmgBT4Ct/a298J4+426/MxF6llc7uQH2xAlwzY70bhXnz/+nejrTAOd6KNFvwPhMQ9QCfGbYxT7Ri+WnkYxw+ZuDjoPgijwGjobEVxhP9i9hhfReNi3qHJb3BrK4l+bV6/xYD7zOtWEv+79BNMECjdW3EVxqvIIpZvUf8jQf0mxa7lUrIXZ4Ti6CHDgd3uQzbf7X51zGMNuqgb7j2ox5wdjSbQibuExgfb3g824yuxf5+qvw511zW/fPMr/P0mrffh6ncsmSsCd8P4F+HlezV/cwyN4p+Mlu9Qeg+O3idWjDzs60a42ersER5ReByT8KX5t8va1nzbuzcYRZN3EvRTsvmWtinP07yas/n+bvKTFL4eVLmq6orvnM6PjcZih73adH55U34j+Rc25LojSfNU2S25n5auBbHd7UKSk3v4ODl+cXTWcrPU1Sb9s1dHhye2MaB2nFq0olJ+NzANxoZmha6hmpMebCHoPFOBO1VJsDVWMFf+SuuNhEgeXgpeOmsl+UNHwrqBpx6ptzgdsYplaLtsH31zLT72PsFifrXPenJ0xjp/fjKfAUL6ocOVqMW2oigKPuvDsU4cxp7lrjhatdLAvI8+1tytHTQt3dZsVslwWqRD8FeE1C2zrSKdzYn2oTmXRQHBJprDIR0etDOeTyqURQfrkLdf91uQt/7uReGObf8EqErkTFoAzHia+zbvaWm+L5ggGiU62JiSSn+h0JO2g9vQdR4qxYfn1HU27Slw0x0DyRAtoiysSrTe1oU8IjToztUyLs5H6NwkoUN4DN8E/JqkZYBrJ0er4kcwqCbX5LNcoJVoXBLUuAhza9WxmLcHNbYK9JEM45LBTXqhLn6AVpwvjyTqzXHIy5LOEhwcLqdpBrh4V3AHN3UUazN0eX0gQE/YOUDZ5ll6AfZIlQLMTAvPdCwPdDyuSaDzyzU7gzpV3UPKlqRsEdM1zoNr8q855K41oL9tc8zWWpnGLbZDZRpck7AhV5vVrOdAwKy4gEUOVt36AghZe2OQNvi6BslXMKyDcon5u0XoBqyuXrg0yc0SNgVgCcI7xHQD/jc7C5tNq9fDdTavT58EKEKUgOYIF39c3EdUuGZRcl8b6uJpxMz562Tr1ew/g6tBwcWI+lEhqhJfoP0rqWB2NE9VcEOxeX1VlNq2xSdXb1O163O7JKF3VZKwXo95SULrb5J4RrpZjBv/Bw7MEyk=")))
| 1,469
| 2,911
| 0.961538
| 102
| 2,938
| 27.696078
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143733
| 0.000681
| 2,938
| 2
| 2,911
| 1,469
| 0.81846
| 0
| 0
| 0
| 0
| 0.5
| 0.975842
| 0.975842
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
6359d77648188d8409d5da2f4a174bce867999cb
| 7
|
py
|
Python
|
app.py
|
weihg/flaskr
|
b3580ac69f1a3bb9bb0f487a8fb9c50ed356754f
|
[
"BSD-3-Clause"
] | null | null | null |
app.py
|
weihg/flaskr
|
b3580ac69f1a3bb9bb0f487a8fb9c50ed356754f
|
[
"BSD-3-Clause"
] | null | null | null |
app.py
|
weihg/flaskr
|
b3580ac69f1a3bb9bb0f487a8fb9c50ed356754f
|
[
"BSD-3-Clause"
] | null | null | null |
r
s
q
| 1.4
| 1
| 0.428571
| 3
| 7
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.571429
| 7
| 4
| 2
| 1.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6387ff9a808a81bb50e1e3c9751596a6b76d60ec
| 97
|
py
|
Python
|
setting.py
|
torch50city/scada-test
|
2525475b054a82dd66cc8d91b82de5e7b27f653d
|
[
"BSD-3-Clause"
] | 1
|
2020-11-23T06:50:07.000Z
|
2020-11-23T06:50:07.000Z
|
setting.py
|
torch50city/scada-test
|
2525475b054a82dd66cc8d91b82de5e7b27f653d
|
[
"BSD-3-Clause"
] | null | null | null |
setting.py
|
torch50city/scada-test
|
2525475b054a82dd66cc8d91b82de5e7b27f653d
|
[
"BSD-3-Clause"
] | null | null | null |
SMAC = '00:0f:b5:4d:be:f3'
DMAC = '00:0c:29:c0:32:f4'
SIP = '192.168.1.11'
DIP = '192.168.1.180'
| 19.4
| 26
| 0.587629
| 24
| 97
| 2.375
| 0.833333
| 0.210526
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0.123711
| 97
| 4
| 27
| 24.25
| 0.270588
| 0
| 0
| 0
| 0
| 0
| 0.608247
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
63ba087a720282fdea47d13a10c6fa41ecdf479a
| 16,039
|
py
|
Python
|
plaso/parsers/winreg_plugins/msie_zones_test.py
|
cvandeplas/plaso
|
b625a2c267ed09505cfac84c9593d8c0922852b1
|
[
"Apache-2.0"
] | 3
|
2016-03-11T02:47:08.000Z
|
2016-12-24T03:19:27.000Z
|
plaso/parsers/winreg_plugins/msie_zones_test.py
|
cvandeplas/plaso
|
b625a2c267ed09505cfac84c9593d8c0922852b1
|
[
"Apache-2.0"
] | null | null | null |
plaso/parsers/winreg_plugins/msie_zones_test.py
|
cvandeplas/plaso
|
b625a2c267ed09505cfac84c9593d8c0922852b1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the MSIE Zone settings Windows Registry plugin."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import winreg as winreg_formatter
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import msie_zones
from plaso.parsers.winreg_plugins import test_lib
class MsieZoneSettingsSoftwareZonesPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for Internet Settings Zones plugin on the Software hive."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = msie_zones.MsieZoneSettingsSoftwareZonesPlugin()
self._test_file = self._GetTestFilePath(['SOFTWARE'])
def testProcessForZone(self):
"""Tests the Process function."""
key_path = u'\\Microsoft\\Windows\\CurrentVersion\\Internet Settings\\Zones'
winreg_key = self._GetKeyFromFile(self._test_file, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 6)
event_object = event_objects[1]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2011-08-28 21:32:44.937675')
self.assertEquals(event_object.timestamp, expected_timestamp)
regvalue_identifier = u'[1200] Run ActiveX controls and plug-ins'
expected_value = u'0 (Allow)'
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_msg = (
u'[{0:s}\\0 (My Computer)] '
u'[1001] Download signed ActiveX controls: 0 (Allow) '
u'[1004] Download unsigned ActiveX controls: 0 (Allow) '
u'[1200] Run ActiveX controls and plug-ins: 0 (Allow) '
u'[1201] Initialize and script ActiveX controls not marked as safe: 1 '
u'(Prompt User) '
u'[1206] Allow scripting of IE Web browser control: 0 '
u'[1207] Reserved: 0 '
u'[1208] Allow previously unused ActiveX controls to run without '
u'prompt: 0 '
u'[1209] Allow Scriptlets: 0 '
u'[120A] Override Per-Site (domain-based) ActiveX restrictions: 0 '
u'[120B] Override Per-Site (domain-based) ActiveX restrictions: 0 '
u'[1400] Active scripting: 0 (Allow) '
u'[1402] Scripting of Java applets: 0 (Allow) '
u'[1405] Script ActiveX controls marked as safe for scripting: 0 '
u'(Allow) '
u'[1406] Access data sources across domains: 0 (Allow) '
u'[1407] Allow Programmatic clipboard access: 0 (Allow) '
u'[1408] Reserved: 0 '
u'[1409] UNKNOWN: 3 '
u'[1601] Submit non-encrypted form data: 0 (Allow) '
u'[1604] Font download: 0 (Allow) '
u'[1605] Run Java: 0 '
u'[1606] Userdata persistence: 0 (Allow) '
u'[1607] Navigate sub-frames across different domains: 0 (Allow) '
u'[1608] Allow META REFRESH: 0 (Allow) '
u'[1609] Display mixed content: 1 (Prompt User) '
u'[160A] Include local directory path when uploading files to a '
u'server: 0 '
u'[1802] Drag and drop or copy and paste files: 0 (Allow) '
u'[1803] File Download: 0 (Allow) '
u'[1804] Launching programs and files in an IFRAME: 0 (Allow) '
u'[1805] Launching programs and files in webview: 0 '
u'[1806] Launching applications and unsafe files: 0 '
u'[1807] Reserved: 0 '
u'[1808] Reserved: 0 '
u'[1809] Use Pop-up Blocker: 3 (Not Allowed) '
u'[180A] Reserved: 0 '
u'[180C] Reserved: 0 '
u'[180D] Reserved: 0 '
u'[180E] UNKNOWN: 0 '
u'[180F] UNKNOWN: 0 '
u'[1A00] User Authentication: Logon: 0x00000000 (Automatic logon with '
u'current user name and password) '
u'[1A02] Allow persistent cookies that are stored on your computer: 0 '
u'[1A03] Allow per-session cookies (not stored): 0 '
u'[1A04] Don\'t prompt for client cert selection when no certs exists: '
u'0 (Allow) '
u'[1A05] Allow 3rd party persistent cookies: 0 '
u'[1A06] Allow 3rd party session cookies: 0 '
u'[1A10] Privacy Settings: 0 '
u'[1C00] Java permissions: 0x00020000 (Medium safety) '
u'[2000] Binary and script behaviors: 0 (Allow) '
u'[2001] .NET: Run components signed with Authenticode: '
u'3 (Not Allowed) '
u'[2004] .NET: Run components not signed with Authenticode: '
u'3 (Not Allowed) '
u'[2005] UNKNOWN: 0 '
u'[2007] UNKNOWN: 3 '
u'[2100] Open files based on content, not file extension: 0 (Allow) '
u'[2101] Web sites in less privileged zone can navigate into this '
u'zone: 3 (Not Allowed) '
u'[2102] Allow script initiated windows without size/position '
u'constraints: 0 (Allow) '
u'[2103] Allow status bar updates via script: 0 '
u'[2104] Allow websites to open windows without address or status '
u'bars: 0 '
u'[2105] Allow websites to prompt for information using scripted '
u'windows: 0 '
u'[2106] UNKNOWN: 0 '
u'[2107] UNKNOWN: 0 '
u'[2200] Automatic prompting for file downloads: 0 (Allow) '
u'[2201] Automatic prompting for ActiveX controls: 0 (Allow) '
u'[2300] Allow web pages to use restricted protocols for active '
u'content: 1 (Prompt User) '
u'[2301] Use Phishing Filter: 3 '
u'[2400] .NET: XAML browser applications: 0 '
u'[2401] .NET: XPS documents: 0 '
u'[2402] .NET: Loose XAML: 0 '
u'[2500] Turn on Protected Mode: 3 '
u'[2600] Enable .NET Framework setup: 0 '
u'[2700] UNKNOWN: 3 '
u'[2701] UNKNOWN: 0 '
u'[2702] UNKNOWN: 3 '
u'[2703] UNKNOWN: 3 '
u'[2708] UNKNOWN: 0 '
u'[2709] UNKNOWN: 0 '
u'[CurrentLevel]: 0 '
u'[Description]: Your computer '
u'[DisplayName]: Computer '
u'[Flags]: 33 '
u'[Icon]: shell32.dll#0016 '
u'[LowIcon]: inetcpl.cpl#005422 '
u'[PMDisplayName]: Computer '
u'[Protected Mode]').format(key_path)
expected_msg_short = u'[{0:s}\\0 (My Computer)] [...'.format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
def testProcessForLockDown(self):
"""Tests the Process function for the lockdown zone key."""
key_path = (
u'\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones')
winreg_key = self._GetKeyFromFile(self._test_file, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 6)
event_object = event_objects[1]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2011-08-28 21:32:44.937675')
self.assertEquals(event_object.timestamp, expected_timestamp)
regvalue_identifier = u'[1200] Run ActiveX controls and plug-ins'
expected_value = u'3 (Not Allowed)'
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_msg = (
u'[{0:s}\\0 (My Computer)] '
u'[1001] Download signed ActiveX controls: 1 (Prompt User) '
u'[1004] Download unsigned ActiveX controls: 3 (Not Allowed) '
u'[1200] Run ActiveX controls and plug-ins: 3 (Not Allowed) '
u'[1201] Initialize and script ActiveX controls not marked as safe: 3 '
u'(Not Allowed) '
u'[1206] Allow scripting of IE Web browser control: 0 '
u'[1207] Reserved: 3 '
u'[1208] Allow previously unused ActiveX controls to run without '
u'prompt: 3 '
u'[1209] Allow Scriptlets: 3 '
u'[120A] Override Per-Site (domain-based) ActiveX restrictions: 3 '
u'[120B] Override Per-Site (domain-based) ActiveX restrictions: 0 '
u'[1400] Active scripting: 1 (Prompt User) '
u'[1402] Scripting of Java applets: 0 (Allow) '
u'[1405] Script ActiveX controls marked as safe for scripting: 0 '
u'(Allow) '
u'[1406] Access data sources across domains: 0 (Allow) '
u'[1407] Allow Programmatic clipboard access: 1 (Prompt User) '
u'[1408] Reserved: 3 '
u'[1409] UNKNOWN: 3 '
u'[1601] Submit non-encrypted form data: 0 (Allow) '
u'[1604] Font download: 0 (Allow) '
u'[1605] Run Java: 0 '
u'[1606] Userdata persistence: 0 (Allow) '
u'[1607] Navigate sub-frames across different domains: 0 (Allow) '
u'[1608] Allow META REFRESH: 0 (Allow) '
u'[1609] Display mixed content: 1 (Prompt User) '
u'[160A] Include local directory path when uploading files to a '
u'server: 0 '
u'[1802] Drag and drop or copy and paste files: 0 (Allow) '
u'[1803] File Download: 0 (Allow) '
u'[1804] Launching programs and files in an IFRAME: 0 (Allow) '
u'[1805] Launching programs and files in webview: 0 '
u'[1806] Launching applications and unsafe files: 0 '
u'[1807] Reserved: 0 '
u'[1808] Reserved: 0 '
u'[1809] Use Pop-up Blocker: 3 (Not Allowed) '
u'[180A] Reserved: 0 '
u'[180C] Reserved: 0 '
u'[180D] Reserved: 0 '
u'[180E] UNKNOWN: 0 '
u'[180F] UNKNOWN: 0 '
u'[1A00] User Authentication: Logon: 0x00000000 (Automatic logon with '
u'current user name and password) '
u'[1A02] Allow persistent cookies that are stored on your computer: 0 '
u'[1A03] Allow per-session cookies (not stored): 0 '
u'[1A04] Don\'t prompt for client cert selection when no certs exists: '
u'3 (Not Allowed) '
u'[1A05] Allow 3rd party persistent cookies: 0 '
u'[1A06] Allow 3rd party session cookies: 0 '
u'[1A10] Privacy Settings: 0 '
u'[1C00] Java permissions: 0x00000000 (Disable Java) '
u'[2000] Binary and script behaviors: 0x00010000 '
u'(Administrator approved) '
u'[2005] UNKNOWN: 3 '
u'[2100] Open files based on content, not file extension: 3 '
u'(Not Allowed) '
u'[2101] Web sites in less privileged zone can navigate into this '
u'zone: 3 (Not Allowed) '
u'[2102] Allow script initiated windows without size/position '
u'constraints: '
u'3 (Not Allowed) '
u'[2103] Allow status bar updates via script: 3 '
u'[2104] Allow websites to open windows without address or status '
u'bars: 3 '
u'[2105] Allow websites to prompt for information using scripted '
u'windows: 3 '
u'[2106] UNKNOWN: 3 '
u'[2107] UNKNOWN: 3 '
u'[2200] Automatic prompting for file downloads: 3 (Not Allowed) '
u'[2201] Automatic prompting for ActiveX controls: 3 (Not Allowed) '
u'[2301] Use Phishing Filter: 3 '
u'[2400] .NET: XAML browser applications: 0 '
u'[2401] .NET: XPS documents: 0 '
u'[2402] .NET: Loose XAML: 0 '
u'[2500] Turn on Protected Mode: 3 '
u'[2600] Enable .NET Framework setup: 0 '
u'[2700] UNKNOWN: 3 '
u'[2701] UNKNOWN: 3 '
u'[2702] UNKNOWN: 3 '
u'[2703] UNKNOWN: 3 '
u'[2708] UNKNOWN: 0 '
u'[2709] UNKNOWN: 0 '
u'[CurrentLevel]: 0 '
u'[Description]: Your computer '
u'[DisplayName]: Computer '
u'[Flags]: 33 '
u'[Icon]: shell32.dll#0016 '
u'[LowIcon]: inetcpl.cpl#005422 '
u'[PMDisplayName]: Computer '
u'[Protected Mode]').format(key_path)
expected_msg_short = u'[{0:s}\\0 (My Com...'.format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
class MsieZoneSettingsUserZonesPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for Internet Settings Zones plugin on the User hive."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = msie_zones.MsieZoneSettingsPlugin()
self._test_file = self._GetTestFilePath(['NTUSER-WIN7.DAT'])
def testProcessForZone(self):
"""Tests the Process function."""
key_path = (
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Zones')
winreg_key = self._GetKeyFromFile(self._test_file, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 6)
event_object = event_objects[1]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2011-09-16 21:12:40.145514')
self.assertEquals(event_object.timestamp, expected_timestamp)
regvalue_identifier = u'[1200] Run ActiveX controls and plug-ins'
expected_value = u'0 (Allow)'
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_msg = (
u'[{0:s}\\0 (My Computer)] '
u'[1200] Run ActiveX controls and plug-ins: 0 (Allow) '
u'[1400] Active scripting: 0 (Allow) '
u'[2001] .NET: Run components signed with Authenticode: 3 (Not '
u'Allowed) '
u'[2004] .NET: Run components not signed with Authenticode: 3 (Not '
u'Allowed) '
u'[2007] UNKNOWN: 3 '
u'[CurrentLevel]: 0 '
u'[Description]: Your computer '
u'[DisplayName]: Computer '
u'[Flags]: 33 [Icon]: shell32.dll#0016 '
u'[LowIcon]: inetcpl.cpl#005422 '
u'[PMDisplayName]: Computer '
u'[Protected Mode]').format(key_path)
expected_msg_short = u'[{0:s}\\0 (My Com...'.format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
def testProcessForLockDown(self):
"""Tests the Process function."""
key_path = (
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones')
winreg_key = self._GetKeyFromFile(self._test_file, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 6)
event_object = event_objects[1]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2011-09-16 21:12:40.145514')
self.assertEquals(event_object.timestamp, expected_timestamp)
regvalue_identifier = u'[1200] Run ActiveX controls and plug-ins'
expected_value = u'3 (Not Allowed)'
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_msg = (
u'[{0:s}\\0 (My Computer)] '
u'[1200] Run ActiveX controls and plug-ins: 3 (Not Allowed) '
u'[1400] Active scripting: 1 (Prompt User) '
u'[CurrentLevel]: 0 '
u'[Description]: Your computer '
u'[DisplayName]: Computer '
u'[Flags]: 33 '
u'[Icon]: shell32.dll#0016 '
u'[LowIcon]: inetcpl.cpl#005422 '
u'[PMDisplayName]: Computer '
u'[Protected Mode]').format(key_path)
expected_msg_short = u'[{0:s}\\...'.format(key_path)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
| 43.231806
| 80
| 0.64312
| 2,102
| 16,039
| 4.821598
| 0.192198
| 0.013024
| 0.022792
| 0.015392
| 0.868772
| 0.852689
| 0.829995
| 0.811741
| 0.791909
| 0.781944
| 0
| 0.079117
| 0.243469
| 16,039
| 370
| 81
| 43.348649
| 0.75614
| 0.069705
| 0
| 0.754153
| 0
| 0
| 0.54792
| 0.01373
| 0
| 0
| 0.003365
| 0
| 0.026578
| 1
| 0.019934
| false
| 0.006645
| 0.023256
| 0
| 0.049834
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
898a91bcf05ec73473e6d2703a7b5590cdb33690
| 287
|
py
|
Python
|
week1/1.06 inheritance/step08 extended stack.py
|
TheNovel/stepik-python-fundamentals-and-application
|
4bf6838cfdb2323da2d8d52cfe393d61a4bb70cc
|
[
"MIT"
] | null | null | null |
week1/1.06 inheritance/step08 extended stack.py
|
TheNovel/stepik-python-fundamentals-and-application
|
4bf6838cfdb2323da2d8d52cfe393d61a4bb70cc
|
[
"MIT"
] | 1
|
2021-12-13T20:46:59.000Z
|
2021-12-13T20:46:59.000Z
|
week1/1.06 inheritance/step08 extended stack.py
|
TheNovel/stepik-python-fundamentals-and-application
|
4bf6838cfdb2323da2d8d52cfe393d61a4bb70cc
|
[
"MIT"
] | 1
|
2020-08-06T21:17:34.000Z
|
2020-08-06T21:17:34.000Z
|
class ExtendedStack(list):
def sum(self):
self.append(self.pop() + self.pop())
def sub(self):
self.append(self.pop() - self.pop())
def mul(self):
self.append(self.pop() * self.pop())
def div(self):
self.append(self.pop() // self.pop())
| 22.076923
| 45
| 0.550523
| 39
| 287
| 4.051282
| 0.282051
| 0.35443
| 0.35443
| 0.455696
| 0.765823
| 0.765823
| 0.765823
| 0.588608
| 0
| 0
| 0
| 0
| 0.254355
| 287
| 12
| 46
| 23.916667
| 0.738318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
89a4f3a0dc886a080a584b733b2c576b3e25a91f
| 397
|
py
|
Python
|
coinlendingbot/websocket/__init__.py
|
m3h7/coinlendingbot
|
d6d217d46fc6e04caf0d4a963278b9895e6737e9
|
[
"MIT"
] | 3
|
2018-07-13T12:42:48.000Z
|
2021-03-22T01:15:32.000Z
|
coinlendingbot/websocket/__init__.py
|
m3h7/coinlendingbot
|
d6d217d46fc6e04caf0d4a963278b9895e6737e9
|
[
"MIT"
] | 1
|
2018-07-29T14:43:19.000Z
|
2022-01-16T13:53:11.000Z
|
coinlendingbot/websocket/__init__.py
|
m3h7/coinlendingbot
|
d6d217d46fc6e04caf0d4a963278b9895e6737e9
|
[
"MIT"
] | 3
|
2020-05-05T12:41:37.000Z
|
2022-01-21T14:48:17.000Z
|
from coinlendingbot.websocket.BitfinexWsClientProtocol import BitfinexWsClientProtocol
from coinlendingbot.websocket.ExchangeWsClientFactory import ExchangeWsClientFactory
from coinlendingbot.websocket.ExchangeWsClient import ExchangeWsClient
from coinlendingbot.websocket.WsConfig import WsConfig
__all__ = ["BitfinexWsClientProtocol", "ExchangeWsClientFactory", "ExchangeWsClient", "WsConfig"]
| 56.714286
| 97
| 0.88665
| 29
| 397
| 12
| 0.310345
| 0.206897
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057935
| 397
| 6
| 98
| 66.166667
| 0.930481
| 0
| 0
| 0
| 0
| 0
| 0.178841
| 0.118388
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
98402b6e18a56728fcba60922daeebd86d4a3b60
| 12,994
|
py
|
Python
|
Basic_ML/Classifier_Comparison/classifier_comparison.py
|
jrclimer/Projects
|
6023f8309685d1a273d7e89993863c89ad85dfb5
|
[
"MIT"
] | 27
|
2016-11-18T11:15:58.000Z
|
2021-02-26T05:46:37.000Z
|
Basic_ML/Classifier_Comparison/classifier_comparison.py
|
imsrgadich/Projects_shang
|
a9d4395a98a79fb0a700a99168cd358ab7494fdf
|
[
"MIT"
] | 1
|
2022-01-21T16:09:40.000Z
|
2022-01-21T16:30:10.000Z
|
Basic_ML/Classifier_Comparison/classifier_comparison.py
|
imsrgadich/Projects_shang
|
a9d4395a98a79fb0a700a99168cd358ab7494fdf
|
[
"MIT"
] | 22
|
2016-11-27T06:02:26.000Z
|
2021-09-22T13:40:55.000Z
|
import numpy as np
import pandas as pd
import zipfile
import gzip, cPickle
from sklearn.datasets import load_digits
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.svm import LinearSVC, SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
import time
# Load the titanic dataset
titanic = pd.read_csv('titanic.csv', sep=',', header=0, usecols=(1,2,4,5,6,7))
for i in range(titanic.shape[0]):
titanic.iloc[i,2] = 1 if titanic.iloc[i,2] == "male" else 0
titanic = titanic.dropna(0)
X = titanic.iloc[:,1:]
X = scale(X)
y = titanic.iloc[:,0]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
#naive bayes on titanic
start = time.clock()
clf = GaussianNB()
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "naive bayes accuracy on titanic dataset: %.2f%%" % accuracy
print "time to train naive bayes: %.2f seconds\n" % (end - start)
#logistic regression on titanic
start = time.clock()
clf = LogisticRegression('l2', C=1)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "logistic regression accuracy on titanic dataset: %.2f%%" % accuracy
print "time to train logistic regression: %.2f seconds\n" % (end - start)
#support vector machine w/ linear kernel on titanic
start = time.clock()
clf = LinearSVC(C=0.1)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "linear support vector machine accuracy on titanic dataset: %.2f%%" % accuracy
print "time to train linear support vector machine: %.2f seconds\n" % (end - start)
#support vector machine w/ rbf kernel on titanic
start = time.clock()
clf = SVC(C=1, kernel='rbf', gamma = 0.1)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "rbf support vector machine accuracy on titanic dataset: %.2f%%" % accuracy
print "time to train rbf support vector machine: %.2f seconds\n" % (end - start)
#random forest on titanic
start = time.clock()
clf = RandomForestClassifier(100)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "random forest accuracy on titanic dataset: %.2f%%" % accuracy
print "time to train random forest: %.2f seconds\n" % (end - start)
#adaboost on titanic
start = time.clock()
clf = AdaBoostClassifier()
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "adaboost accuracy on titanic dataset: %.2f%%" % accuracy
print "time to train adaboost: %.2f seconds\n" % (end - start)
#k nearest neighbors w/ euclidean distance on titanic
start = time.clock()
clf = KNeighborsClassifier(n_neighbors=5, algorithm='auto')
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "euclidean k nearest neighbors accuracy on titanic dataset: %.2f%%" % accuracy
print "time to train euclidean k nearest neighbors: %.2f seconds\n" % (end - start)
#k nearest neighbors w/ cosine distance on titanic
start = time.clock()
clf = KNeighborsClassifier(n_neighbors=5, algorithm='brute', metric='cosine')
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "cosine k nearest neighbors accuracy on titanic dataset: %.2f%%" % accuracy
print "time to train cosine k nearest neighbors: %.2f seconds\n" % (end - start)
'''
-----------------------------------------
'''
print "\n-----------------------------------------\n\n"
# Load the MAGIC dataset
zf = zipfile.ZipFile('magic.zip')
data = zf.open('magic.dat')
magic = pd.read_csv(data, sep=',', skiprows=15)
X = magic.iloc[:,:-1]
y = magic.iloc[:,-1]
X = scale(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
#naive bayes on MAGIC
start = time.clock()
clf = GaussianNB()
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "naive bayes accuracy on MAGIC dataset: %.2f%%" % accuracy
print "time to train naive bayes: %.2f seconds\n" % (end - start)
#logistic regression on MAGIC
start = time.clock()
clf = LogisticRegression('l2', C=1)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "logistic regression accuracy on MAGIC dataset: %.2f%%" % accuracy
print "time to train logistic regression: %.2f seconds\n" % (end - start)
#support vector machine w/ linear kernel on MAGIC
start = time.clock()
clf = LinearSVC(C=0.1)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "linear support vector machine accuracy on MAGIC dataset: %.2f%%" % accuracy
print "time to train linear support vector machine: %.2f seconds\n" % (end - start)
#support vector machine w/ rbf kernel on MAGIC
start = time.clock()
clf = SVC(C=1, kernel='rbf', gamma = 0.1)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "rbf support vector machine accuracy on MAGIC dataset: %.2f%%" % accuracy
print "time to train rbf support vector machine: %.2f seconds\n" % (end - start)
#random forest on MAGIC
start = time.clock()
clf = RandomForestClassifier(100)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "random forest accuracy on MAGIC dataset: %.2f%%" % accuracy
print "time to train random forest: %.2f seconds\n" % (end - start)
#adaboost on MAGIC
start = time.clock()
clf = AdaBoostClassifier()
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "adaboost accuracy on MAGIC dataset: %.2f%%" % accuracy
print "time to train adaboost: %.2f seconds\n" % (end - start)
#k nearest neighbors w/ euclidean distance on MAGIC
start = time.clock()
clf = KNeighborsClassifier(n_neighbors=5, algorithm='auto')
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "euclidean k nearest neighbors accuracy on MAGIC dataset: %.2f%%" % accuracy
print "time to train euclidean k nearest neighbors: %.2f seconds\n" % (end - start)
#k nearest neighbors w/ cosine distance on MAGIC
start = time.clock()
clf = KNeighborsClassifier(n_neighbors=5, algorithm='brute', metric='cosine')
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "cosine k nearest neighbors accuracy on MAGIC dataset: %.2f%%" % accuracy
print "time to train cosine k nearest neighbors: %.2f seconds\n" % (end - start)
'''
-----------------------------------------
'''
print "\n-----------------------------------------\n\n"
# Load the digits dataset
digits = load_digits()
X = digits.data
y = digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
#naive bayes on digits
start = time.clock()
clf = OneVsRestClassifier(MultinomialNB())
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "naive bayes accuracy on digits (small dataset): %.2f%%" % accuracy
print "time to train naive bayes: %.2f seconds\n" % (end - start)
#logistic regression on digits
start = time.clock()
clf = LogisticRegression('l1', C=0.1)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "logistic regression accuracy on digits (small dataset): %.2f%%" % accuracy
print "time to train logistic regression: %.2f seconds\n" % (end - start)
#support vector machine w/ linear kernel on digits
start = time.clock()
clf = LinearSVC(C=0.1)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "linear support vector machine accuracy on digits (small dataset): %.2f%%" % accuracy
print "time to train linear support vector machine: %.2f seconds\n" % (end - start)
#support vector machine w/ rbf kernel on digits
start = time.clock()
clf = OneVsRestClassifier(SVC(C=1, kernel='rbf', gamma=0.001))
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "rbf support vector machine accuracy on digits (small dataset): %.2f%%" % accuracy
print "time to train rbf support vector machine: %.2f seconds\n" % (end - start)
#random forest on digits
start = time.clock()
clf = RandomForestClassifier(100)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "random forest accuracy on digits (small dataset): %.2f%%" % accuracy
print "time to train random forest: %.2f seconds\n" % (end - start)
#adaboost on digits
start = time.clock()
clf = OneVsRestClassifier(AdaBoostClassifier())
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "adaboost accuracy on digits (small dataset): %.2f%%" % accuracy
print "time to train adaboost: %.2f seconds\n" % (end - start)
#k nearest neighbors w/ euclidean distance on digits
start = time.clock()
clf = KNeighborsClassifier(n_neighbors=5, algorithm='auto')
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "euclidean k nearest neighbors accuracy on digits (small dataset): %.2f%%" % accuracy
print "time to train euclidean k nearest neighbors: %.2f seconds\n" % (end - start)
#k nearest neighbors w/ cosine distance on digits
start = time.clock()
clf = KNeighborsClassifier(n_neighbors=5, algorithm='brute', metric='cosine')
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "cosine k nearest neighbors accuracy on digits (small dataset): %.2f%%" % accuracy
print "time to train cosine k nearest neighbors: %.2f seconds\n" % (end - start)
'''
-----------------------------------------
'''
print "\n-----------------------------------------\n\n"
# Load the mnist dataset
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
X_train = np.array(train_set[0])
y_train = np.array(train_set[1])
X_test = np.array(test_set[0])
y_test = np.array(test_set[1])
#naive bayes on mnist
start = time.clock()
clf = OneVsRestClassifier(MultinomialNB())
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "naive bayes accuracy on mnist (large dataset): %.2f%%" % accuracy
print "time to train naive bayes: %.2f seconds\n" % (end - start)
#logistic regression on mnist
start = time.clock()
clf = LogisticRegression()
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "logistic regression accuracy on mnist (large dataset): %.2f%%" % accuracy
print "time to train logistic regression: %.2f seconds\n" % (end - start)
#support vector machine w/ linear kernel on mnist
start = time.clock()
clf = LinearSVC()
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "linear support vector machine accuracy on mnist (large dataset): %.2f%%" % accuracy
print "time to train linear support vector machine: %.2f seconds\n" % (end - start)
#support vector machine w/ rbf kernel on mnist
start = time.clock()
clf = OneVsRestClassifier(SVC(C=1, kernel='rbf', gamma=0.001))
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "rbf support vector machine accuracy on mnist (large dataset): %.2f%%" % accuracy
print "time to train rbf support vector machine: %.2f seconds\n" % (end - start)
#random forest on mnist
start = time.clock()
clf = RandomForestClassifier(100)
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "random forest accuracy on mnist (large dataset): %.2f%%" % accuracy
print "time to train random forest: %.2f seconds\n" % (end - start)
#adaboost on mnist
start = time.clock()
clf = OneVsRestClassifier(AdaBoostClassifier())
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "adaboost accuracy on mnist (large dataset): %.2f%%" % accuracy
print "time to train adaboost: %.2f seconds\n" % (end - start)
#k nearest neighbors w/ euclidean distance on mnist
start = time.clock()
clf = KNeighborsClassifier(n_neighbors=5, algorithm='auto')
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "euclidean k nearest neighbors accuracy on mnist (large dataset): %.2f%%" % accuracy
print "time to train k nearest neighbors: %.2f seconds\n" % (end - start)
#k nearest neighbors w/ cosine distance on mnist
start = time.clock()
clf = KNeighborsClassifier(n_neighbors=5, algorithm='brute', metric='cosine')
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test) * 100.0
end = time.clock()
print "cosine k nearest neighbors accuracy on mnist (large dataset): %.2f%%" % accuracy
print "time to train cosine k nearest neighbors: %.2f seconds\n" % (end - start)
| 36.810198
| 91
| 0.714945
| 2,027
| 12,994
| 4.495807
| 0.062654
| 0.063206
| 0.023044
| 0.059695
| 0.899045
| 0.883134
| 0.871722
| 0.862175
| 0.862175
| 0.862175
| 0
| 0.024265
| 0.134139
| 12,994
| 353
| 92
| 36.810198
| 0.785708
| 0.094351
| 0
| 0.731343
| 0
| 0
| 0.323349
| 0.012171
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.05597
| null | null | 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
98640bdd7928f4d33a92d9b416d7293a91541c62
| 5,788
|
py
|
Python
|
tests/unit_tests/test_rm/test_slurm.py
|
lsawade/radical.pilot
|
b430f5c53a7cfdeb124ef81a8c0272d4dbe4987e
|
[
"MIT"
] | null | null | null |
tests/unit_tests/test_rm/test_slurm.py
|
lsawade/radical.pilot
|
b430f5c53a7cfdeb124ef81a8c0272d4dbe4987e
|
[
"MIT"
] | null | null | null |
tests/unit_tests/test_rm/test_slurm.py
|
lsawade/radical.pilot
|
b430f5c53a7cfdeb124ef81a8c0272d4dbe4987e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# pylint: disable=protected-access, unused-argument, no-value-for-parameter
import os
from unittest import mock, TestCase
import radical.utils as ru
from radical.pilot.agent.resource_manager.slurm import Slurm
class TestSlurm(TestCase):
# ------------------------------------------------------------------------------
#
@mock.patch.object(Slurm, '__init__', return_value=None)
@mock.patch('radical.utils.raise_on')
@mock.patch('hostlist.expand_hostlist', return_value=['nodes1', 'nodes2'])
def test_configure(self, mocked_init, mocked_raise_on, mocked_expand_hostlist):
# Test 1 no config file
os.environ['SLURM_NODELIST'] = 'nodes-[1-2]'
os.environ['SLURM_NPROCS'] = '48'
os.environ['SLURM_NNODES'] = '2'
os.environ['SLURM_CPUS_ON_NODE'] = '24'
component = Slurm(cfg=None, session=None)
component._log = ru.Logger('dummy')
component._cfg = {}
component.lm_info = {'cores_per_node': None}
import sys
sys.stderr.write('%s' % os.environ.get('SLURM_NODELIST'))
sys.stderr.flush()
component._configure()
self.assertEqual(component.node_list, [['nodes1','nodes1'],['nodes2','nodes2']])
self.assertEqual(component.cores_per_node, 24)
self.assertEqual(component.gpus_per_node, 0)
self.assertEqual(component.lfs_per_node, {'path': None, 'size': 0})
# Test 2 config file
os.environ['SLURM_NODELIST'] = 'nodes-[1-2]'
os.environ['SLURM_NPROCS'] = '48'
os.environ['SLURM_NNODES'] = '2'
os.environ['SLURM_CPUS_ON_NODE'] = '24'
component = Slurm(cfg=None, session=None)
component._log = ru.Logger('dummy')
component._cfg = {'cores_per_node': 24,
'gpus_per_node': 1,
'lfs_path_per_node': 'test/',
'lfs_size_per_node': 100}
component.lm_info = {'cores_per_node': None}
component._configure()
self.assertEqual(component.node_list, [['nodes1','nodes1'],['nodes2','nodes2']])
self.assertEqual(component.cores_per_node, 24)
self.assertEqual(component.gpus_per_node, 1)
self.assertEqual(component.lfs_per_node, {'path': 'test/', 'size': 100})
# Test 3 config file
os.environ['SLURM_NODELIST'] = 'nodes-[1-2]'
os.environ['SLURM_NPROCS'] = '48'
os.environ['SLURM_NNODES'] = '2'
os.environ['SLURM_CPUS_ON_NODE'] = '24'
os.environ['LOCAL'] = '/local_folder/'
component = Slurm(cfg=None, session=None)
component._log = ru.Logger('dummy')
component._cfg = {'cores_per_node': 24,
'gpus_per_node': 1,
'lfs_path_per_node': '${LOCAL}',
'lfs_size_per_node': 100}
component.lm_info = {'cores_per_node': None}
component._configure()
self.assertEqual(component.node_list, [['nodes1','nodes1'],['nodes2','nodes2']])
self.assertEqual(component.cores_per_node, 24)
self.assertEqual(component.gpus_per_node, 1)
self.assertEqual(component.lfs_per_node, {'path': '/local_folder/', 'size': 100})
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
#
@mock.patch.object(Slurm, '__init__', return_value=None)
@mock.patch('radical.utils.raise_on')
@mock.patch('hostlist.expand_hostlist', return_value=['nodes1', 'nodes2'])
def test_configure_error(self, mocked_init, mocked_raise_on, mocked_expand_hostlist):
# Test 1 no config file
if 'SLURM_NODELIST' in os.environ:
del os.environ['SLURM_NODELIST']
os.environ['SLURM_NPROCS'] = '48'
os.environ['SLURM_NNODES'] = '2'
os.environ['SLURM_CPUS_ON_NODE'] = '24'
component = Slurm(cfg=None, session=None)
component._log = ru.Logger('dummy')
component._cfg = {}
component.lm_info = {}
with self.assertRaises(RuntimeError):
component._configure()
# Test 2 config file
os.environ['SLURM_NODELIST'] = 'nodes-[1-2]'
del os.environ['SLURM_NPROCS']
os.environ['SLURM_NNODES'] = '2'
os.environ['SLURM_CPUS_ON_NODE'] = '24'
component = Slurm(cfg=None, session=None)
component._log = ru.Logger('dummy')
component._cfg = {}
component.lm_info = {}
with self.assertRaises(RuntimeError):
component._configure()
# Test 2 config file
os.environ['SLURM_NODELIST'] = 'nodes-[1-2]'
os.environ['SLURM_NPROCS'] = '48'
del os.environ['SLURM_NNODES']
os.environ['SLURM_CPUS_ON_NODE'] = '24'
component = Slurm(cfg=None, session=None)
component._log = ru.Logger('dummy')
component._cfg = {}
component.lm_info = {}
with self.assertRaises(RuntimeError):
component._configure()
# Test 2 config file
os.environ['SLURM_NODELIST'] = 'nodes-[1-2]'
os.environ['SLURM_NPROCS'] = '48'
os.environ['SLURM_NNODES'] = '2'
del os.environ['SLURM_CPUS_ON_NODE']
component = Slurm(cfg=None, session=None)
component._log = ru.Logger('dummy')
component._cfg = {}
component.lm_info = {}
with self.assertRaises(RuntimeError):
component._configure()
if __name__ == '__main__':
tc = TestSlurm()
tc.test_configure()
tc.test_configure_error()
# ------------------------------------------------------------------------------
# pylint: enable=protected-access, unused-argument, no-value-for-parameter
| 36.402516
| 89
| 0.574637
| 642
| 5,788
| 4.928349
| 0.152648
| 0.08818
| 0.123894
| 0.047408
| 0.855563
| 0.849241
| 0.841656
| 0.824589
| 0.794248
| 0.794248
| 0
| 0.021253
| 0.235833
| 5,788
| 158
| 90
| 36.632911
| 0.694099
| 0.107636
| 0
| 0.757009
| 0
| 0
| 0.194989
| 0.017868
| 0
| 0
| 0
| 0
| 0.149533
| 1
| 0.018692
| false
| 0
| 0.046729
| 0
| 0.074766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
98a89b75ebb7551fc6934ff6df4b99176c0b9317
| 3,794
|
py
|
Python
|
Horror Trees/Predicate/tests/test_task.py
|
jetbrains-academy/Machine-Learning-101
|
7b583dbff1e90115296dcaeac78ca88363c158c9
|
[
"MIT"
] | null | null | null |
Horror Trees/Predicate/tests/test_task.py
|
jetbrains-academy/Machine-Learning-101
|
7b583dbff1e90115296dcaeac78ca88363c158c9
|
[
"MIT"
] | 10
|
2021-11-22T16:51:52.000Z
|
2022-02-14T12:57:57.000Z
|
Horror Trees/Predicate/tests/test_task.py
|
jetbrains-academy/Machine-Learning-101
|
7b583dbff1e90115296dcaeac78ca88363c158c9
|
[
"MIT"
] | null | null | null |
import numpy as np
import unittest
from numpy import array_equal
from numpy.ma.testutils import assert_array_equal
from divide import Predicate
class TestCase(unittest.TestCase):
def test_nominal_int(self):
predicate = Predicate(0, 2)
X = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[1, 2, 3]])
y = np.array([1, 2, 3, 4])
X1, y1, X2, y2 = predicate.divide(X, y)
if array_equal(np.array([[2, 2, 2], [3, 3, 3]]), X1):
assert_array_equal(np.array([2, 3]), y1, err_msg="Incorrect split for int feature")
assert_array_equal(np.array([[1, 1, 1], [1, 2, 3]]), X2, err_msg="Incorrect split for int feature")
assert_array_equal(np.array([1, 4]), y2, err_msg="Incorrect split for int feature")
else:
assert_array_equal(np.array([[1, 1, 1], [1, 2, 3]]), X1, err_msg="Incorrect split for int feature")
assert_array_equal(np.array([1, 4]), y1, err_msg="Incorrect split for int feature")
assert_array_equal(np.array([[2, 2, 2], [3, 3, 3]]), X2, err_msg="Incorrect split for int feature")
assert_array_equal(np.array([2, 3]), y2, err_msg="Incorrect split for int feature")
def test_nominal_float(self):
predicate = Predicate(0, 2.1)
X = np.array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.],
[1., 2., 3.]])
y = np.array([1, 2, 3, 4])
X1, y1, X2, y2 = predicate.divide(X, y)
if array_equal(np.array([[3, 3, 3]]), X1):
assert_array_equal(np.array([3]), y1, err_msg="Incorrect split for float feature")
assert_array_equal(np.array([[1, 1, 1], [2, 2, 2], [1, 2, 3]]), X2,
err_msg="Incorrect split for float feature")
assert_array_equal(np.array([1, 2, 4]), y2, err_msg="Incorrect split for float feature")
else:
assert_array_equal(np.array([[1, 1, 1], [2, 2, 2], [1, 2, 3]]), X1,
err_msg="Incorrect split for float feature")
assert_array_equal(np.array([1, 2, 4]), y1, err_msg="Incorrect split for float feature")
assert_array_equal(np.array([[3, 3, 3]]), X2, err_msg="Incorrect split for float feature")
assert_array_equal(np.array([3]), y2, err_msg="Incorrect split for float feature")
def test_quantitative(self):
predicate = Predicate(3, 'clear')
X = np.array([[1, 1, 1, 'clear'],
[2, 2, 2, 'clear'],
[3, 3, 3, 'green'],
[1, 2, 3, 'black']])
y = np.array([1, 2, 3, 4])
X1, y1, X2, y2 = predicate.divide(X, y)
if array_equal(np.array([[1, 1, 1, 'clear'], [2, 2, 2, 'clear']]), X1):
assert_array_equal(np.array([1, 2]), y1, err_msg="Incorrect split for quantitative feature")
assert_array_equal(np.array([[3, 3, 3, 'green'], [1, 2, 3, 'black']]), X2,
err_msg="Incorrect split for quantitative feature")
assert_array_equal(np.array([3, 4]), y2, err_msg="Incorrect split for quantitative feature")
else:
assert_array_equal(np.array([[3, 3, 3, 'green'], [1, 2, 3, 'black']]), X1,
err_msg="Incorrect split for quantitative feature")
assert_array_equal(np.array([3, 4]), y1, err_msg="Incorrect split for quantitative feature")
assert_array_equal(np.array([[1, 1, 1, 'clear'], [2, 2, 2, 'clear']]), X2,
err_msg="Incorrect split for quantitative feature")
assert_array_equal(np.array([1, 2]), y2, err_msg="Incorrect split for quantitative feature")
| 50.586667
| 111
| 0.541118
| 546
| 3,794
| 3.624542
| 0.080586
| 0.106114
| 0.145528
| 0.206165
| 0.881758
| 0.857504
| 0.854977
| 0.838302
| 0.719555
| 0.690248
| 0
| 0.069951
| 0.299157
| 3,794
| 74
| 112
| 51.27027
| 0.674314
| 0
| 0
| 0.225806
| 0
| 0
| 0.209014
| 0
| 0
| 0
| 0
| 0
| 0.354839
| 1
| 0.048387
| false
| 0
| 0.080645
| 0
| 0.145161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7f579072dc715d287b7026e3d086c86b70690207
| 8,365
|
py
|
Python
|
tests/python/auto_tensorize/test_manual_mapping_params_conv2d_tensorcore.py
|
QinHan-Erin/AMOS
|
634bf48edf4015e4a69a8c32d49b96bce2b5f16f
|
[
"Apache-2.0"
] | 22
|
2022-03-18T07:29:31.000Z
|
2022-03-23T14:54:32.000Z
|
tests/python/auto_tensorize/test_manual_mapping_params_conv2d_tensorcore.py
|
QinHan-Erin/AMOS
|
634bf48edf4015e4a69a8c32d49b96bce2b5f16f
|
[
"Apache-2.0"
] | null | null | null |
tests/python/auto_tensorize/test_manual_mapping_params_conv2d_tensorcore.py
|
QinHan-Erin/AMOS
|
634bf48edf4015e4a69a8c32d49b96bce2b5f16f
|
[
"Apache-2.0"
] | 2
|
2022-03-18T08:26:34.000Z
|
2022-03-20T06:02:48.000Z
|
import tvm
import time
import numpy as np
from tvm import auto_tensorize as at
def conv2d(N, C, H, W, K, R, S, stride, padding, dilation, layout, in_dtype, out_dtype):
kH = (R - 1) * dilation + 1
kW = (S - 1) * dilation + 1
pH = H + 2 * padding
pW = W + 2 * padding
if layout == "nchw":
A = tvm.te.placeholder([N, C, H, W], dtype=in_dtype, name="A")
B = tvm.te.placeholder([K, C, R, S], dtype=in_dtype, name="B")
Pad = tvm.te.compute(
[N, C, pH, pW],
lambda n, c, h, w: tvm.tir.if_then_else(
tvm.tir.all(h >= padding, h - padding < H, w >= padding, w - padding < W),
A[n, c, h - padding, w - padding],
tvm.tir.const(0.0, A.dtype),
),
name="Pad",
)
rc = tvm.te.reduce_axis([0, C], name="rc")
rr = tvm.te.reduce_axis([0, kH], name="rr")
rs = tvm.te.reduce_axis([0, kW], name="rs")
P = (pH - kH) // stride + 1
Q = (pW - kW) // stride + 1
Conv = tvm.te.compute(
[N, K, P, Q],
lambda n, k, p, q: tvm.te.sum(
(
Pad[n, rc, p * stride + rr * dilation, q * stride + rs * dilation]
* B[k, rc, rr, rs]
).astype(out_dtype),
axis=[rc, rr, rs],
),
name="Conv",
)
elif layout == "nhwc":
A = tvm.te.placeholder([N, H, W, C], dtype=in_dtype, name="A")
B = tvm.te.placeholder([R, S, C, K], dtype=in_dtype, name="B")
Pad = tvm.te.compute(
[N, pH, pW, C],
lambda n, h, w, c: tvm.tir.if_then_else(
tvm.tir.all(h >= padding, h - padding < H, w >= padding, w - padding < W),
A[n, h - padding, w - padding, c],
tvm.tir.const(0.0, A.dtype),
),
name="Pad",
)
rc = tvm.te.reduce_axis([0, C], name="rc")
rr = tvm.te.reduce_axis([0, kH], name="rr")
rs = tvm.te.reduce_axis([0, kW], name="rs")
P = (pH - kH) // stride + 1
Q = (pW - kW) // stride + 1
Conv = tvm.te.compute(
[N, P, Q, K],
lambda n, p, q, k: tvm.te.sum(
(
Pad[n, p * stride + rr * dilation, q * stride + rs * dilation, rc]
* B[rr, rs, rc, k]
).astype(out_dtype),
axis=[rr, rs, rc],
),
name="Conv",
)
elif layout == "hwnc":
A = tvm.te.placeholder([H, W, N, C], dtype=in_dtype, name="A")
B = tvm.te.placeholder([R, S, C, K], dtype=in_dtype, name="B")
Pad = tvm.te.compute(
[pH, pW, N, C],
lambda h, w, n, c: tvm.tir.if_then_else(
tvm.tir.all(h >= padding, h - padding < H, w >= padding, w - padding < W),
A[h - padding, w - padding, n, c],
tvm.tir.const(0.0, A.dtype),
),
name="Pad",
)
rc = tvm.te.reduce_axis([0, C], name="rc")
rr = tvm.te.reduce_axis([0, kH], name="rr")
rs = tvm.te.reduce_axis([0, kW], name="rs")
P = (pH - kH) // stride + 1
Q = (pW - kW) // stride + 1
Conv = tvm.te.compute(
[P, Q, N, K],
lambda p, q, n, k: tvm.te.sum(
(
Pad[p * stride + rr * dilation, q * stride + rs * dilation, n, rc]
* B[rr, rs, rc, k]
).astype(out_dtype),
axis=[rr, rs, rc],
),
name="Conv",
)
else:
raise RuntimeError(f"Unkonwn layout for conv2d: {layout}")
return [A, B, Conv]
def mapping0000010():
hw_abs_dag = at.WMMAFp16Fp16()
compute_key = "nnn"
shape_key = "8x32x16"
intrin_dag, _ = hw_abs_dag.get_effective_compute_dag(compute_key, shape_key)
A, B, Conv = conv2d(1, 128, 28, 28, 128, 3, 3, 1, 1, 1, "nchw", "float16", "float16")
target_dag = at.compute_dag_from_tensors([Conv])
main_op_map = {
intrin_dag.op_lst[0]: target_dag.op_lst[1]
}
elem_op_map = {
}
ii, jj = intrin_dag.op_lst[0].axis
kk, = intrin_dag.op_lst[0].reduce_axis
n, k, p, q = target_dag.op_lst[1].axis
rc, rr, rs = target_dag.op_lst[1].reduce_axis
axis_map = {
ii: [n, n, n, p, p, q, q],
jj: [k, k, k, k, k, k, k],
kk: [rc, rr, rs, rc, rs, rc, rr]
}
match_result = at.IntrinMatchResult(
hw_abs_dag, compute_key, shape_key,
main_op_map, elem_op_map,
axis_map, target_dag, intrin_dag
)
gen = at.MappingGenerator(match_result)
record = gen.get(policy="random")
record.vmap_choice = ([0, 0, 0, 0, 0, 1, 0], record.vmap_choice[1])
print("mapping decision:")
for k, v in record.to_json().items():
print(k, "=", v)
app = at.MappingApplier(match_result)
new_state = app.apply(record)
schedule_gen = at.CUDAScheduleGeneratorV2(match_result, new_state)
sc_info = schedule_gen.get_schedule_compute_info()
schedule_app = at.CUDAScheduleApplierV2(match_result, sc_info)
params = schedule_gen.get(policy="random")
# block
my_params = {
'inline': (0, -1),
'vectorize': (2, -1),
'spatial_factors': [
([2, 1, 2, 1], (1, 0, -1)),
([4, 1, 1, 1], (1, 0, -1)),
([1, 1, 1, 1], (0, 0, 0)),
([7, 1, 1, 4], (1, 0, 0))],
'reduce_factors': [
([8, 1, 1], (0, 0)),
([3, 1, 1], (1, -1)),
([1, 3, 1], (0, 0))],
'last_factors': [([392, 4, 2], (0, 1))],
'output_unroll_step': (16, -1),
'last_unroll_step': (64, 1)
}
params.from_json(my_params)
target = "cuda"
measure_opt = at.MeasureOptions(target=target, timeout=100, number=200, min_repeat_ms=500)
cost = at.evaluate_params(schedule_app, params, measure_opt, dump=True)
print("Cost is %f ms" % (cost))
def mapping0001000():
hw_abs_dag = at.WMMAFp16Fp16()
compute_key = "nnn"
shape_key = "8x32x16"
intrin_dag, _ = hw_abs_dag.get_effective_compute_dag(compute_key, shape_key)
A, B, Conv = conv2d(1, 128, 28, 28, 128, 3, 3, 1, 1, 1, "nchw", "float16", "float16")
target_dag = at.compute_dag_from_tensors([Conv])
main_op_map = {
intrin_dag.op_lst[0]: target_dag.op_lst[1]
}
elem_op_map = {
}
ii, jj = intrin_dag.op_lst[0].axis
kk, = intrin_dag.op_lst[0].reduce_axis
n, k, p, q = target_dag.op_lst[1].axis
rc, rr, rs = target_dag.op_lst[1].reduce_axis
axis_map = {
ii: [n, n, n, p, p, q, q],
jj: [k, k, k, k, k, k, k],
kk: [rc, rr, rs, rc, rs, rc, rr]
}
match_result = at.IntrinMatchResult(
hw_abs_dag, compute_key, shape_key,
main_op_map, elem_op_map,
axis_map, target_dag, intrin_dag
)
gen = at.MappingGenerator(match_result)
record = gen.get(policy="random")
record.vmap_choice = ([0, 0, 0, 1, 0, 0, 0], record.vmap_choice[1])
print("mapping decision:")
for k, v in record.to_json().items():
print(k, "=", v)
app = at.MappingApplier(match_result)
new_state = app.apply(record)
schedule_gen = at.CUDAScheduleGeneratorV2(match_result, new_state)
sc_info = schedule_gen.get_schedule_compute_info()
schedule_app = at.CUDAScheduleApplierV2(match_result, sc_info)
params = schedule_gen.get(policy="random")
my_params = {
'inline': (0, -1),
'vectorize': (2, -1),
'spatial_factors': [
([2, 1, 2, 1], (1, 0, -1)),
([4, 1, 1, 1], (1, 0, -1)),
([1, 1, 1, 1], (0, 0, 0)),
([7, 1, 1, 4], (1, 0, 0))],
'reduce_factors': [
([8, 1, 1], (0, 0)),
([3, 1, 1], (1, -1)),
([1, 3, 1], (0, 0))],
'last_factors': [([392, 4, 2], (0, 1))],
'output_unroll_step': (16, -1),
'last_unroll_step': (64, 1)
}
params.from_json(my_params)
target = "cuda"
measure_opt = at.MeasureOptions(target=target, timeout=100, number=200, min_repeat_ms=500)
cost = at.evaluate_params(schedule_app, params, measure_opt, dump=True)
print("Cost is %f ms" % (cost))
if __name__ == "__main__":
mapping0000010()
# mapping0001000()
| 32.933071
| 94
| 0.502809
| 1,211
| 8,365
| 3.308836
| 0.127993
| 0.015972
| 0.013476
| 0.009983
| 0.861742
| 0.842276
| 0.842276
| 0.842276
| 0.816821
| 0.808335
| 0
| 0.051031
| 0.327675
| 8,365
| 253
| 95
| 33.063241
| 0.661451
| 0.00275
| 0
| 0.703704
| 0
| 0
| 0.051565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013889
| false
| 0
| 0.018519
| 0
| 0.037037
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f69b04e696cf1ddf7f980e11b22b6b2c38fc895f
| 854
|
py
|
Python
|
octicons16px/mortar_board.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | 1
|
2021-01-28T06:47:39.000Z
|
2021-01-28T06:47:39.000Z
|
octicons16px/mortar_board.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
octicons16px/mortar_board.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
OCTICON_MORTAR_BOARD = """
<svg class="octicon octicon-mortar-board" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M7.693 1.066a.75.75 0 01.614 0l7.25 3.25a.75.75 0 010 1.368L13 6.831v2.794c0 1.024-.81 1.749-1.66 2.173-.893.447-2.075.702-3.34.702-.278 0-.55-.012-.816-.036a.75.75 0 01.133-1.494c.22.02.45.03.683.03 1.082 0 2.025-.221 2.67-.543.69-.345.83-.682.83-.832V7.503L8.307 8.934a.75.75 0 01-.614 0L4 7.28v1.663c.296.105.575.275.812.512.438.438.688 1.059.688 1.796v3a.75.75 0 01-.75.75h-3a.75.75 0 01-.75-.75v-3c0-.737.25-1.358.688-1.796.237-.237.516-.407.812-.512V6.606L.443 5.684a.75.75 0 010-1.368l7.25-3.25zM2.583 5L8 7.428 13.416 5 8 2.572 2.583 5zM2.5 11.25c0-.388.125-.611.25-.735a.704.704 0 01.5-.203c.19 0 .37.071.5.203.125.124.25.347.25.735v2.25H2.5v-2.25z"></path></svg>
"""
| 170.8
| 821
| 0.681499
| 215
| 854
| 2.697674
| 0.604651
| 0.048276
| 0.060345
| 0.060345
| 0.096552
| 0
| 0
| 0
| 0
| 0
| 0
| 0.541087
| 0.07377
| 854
| 4
| 822
| 213.5
| 0.192162
| 0
| 0
| 0
| 0
| 0.333333
| 0.96483
| 0.488863
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
63e864597ef319dfb697502d6477210d310071bf
| 108
|
py
|
Python
|
api/ansible_api/urls/__init__.py
|
240325184/KubeOperator
|
777774050b236abf938a5a9ef505124c26e4916e
|
[
"Apache-2.0"
] | 3
|
2019-11-29T03:49:08.000Z
|
2020-07-29T02:52:51.000Z
|
api/ansible_api/urls/__init__.py
|
240325184/KubeOperator
|
777774050b236abf938a5a9ef505124c26e4916e
|
[
"Apache-2.0"
] | 27
|
2021-05-05T02:51:26.000Z
|
2022-01-04T21:30:21.000Z
|
api/ansible_api/urls/__init__.py
|
240325184/KubeOperator
|
777774050b236abf938a5a9ef505124c26e4916e
|
[
"Apache-2.0"
] | 1
|
2020-11-22T01:15:05.000Z
|
2020-11-22T01:15:05.000Z
|
from .api_urls import urlpatterns as api_urlpatterns
from .view_urls import urlpatterns as view_urlpatterns
| 36
| 54
| 0.87037
| 16
| 108
| 5.625
| 0.4375
| 0.222222
| 0.466667
| 0.511111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 108
| 2
| 55
| 54
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
63ff24dbf2502b2595a7ce3bcf7406a285b35a92
| 224
|
py
|
Python
|
pytest_course/a_overview/Company.py
|
JanoBourian/automatization-course
|
780e8ca6d1ed5f97efc36d823f7eb76fa1198338
|
[
"MIT"
] | null | null | null |
pytest_course/a_overview/Company.py
|
JanoBourian/automatization-course
|
780e8ca6d1ed5f97efc36d823f7eb76fa1198338
|
[
"MIT"
] | null | null | null |
pytest_course/a_overview/Company.py
|
JanoBourian/automatization-course
|
780e8ca6d1ed5f97efc36d823f7eb76fa1198338
|
[
"MIT"
] | null | null | null |
class Company:
def __init__(self, name:str, stock_symbol:str) -> None:
self.name = name
self.stock_symbol = stock_symbol
def __str__(self):
return f"{self.name}:{self.stock_symbol}"
| 28
| 59
| 0.616071
| 29
| 224
| 4.344828
| 0.413793
| 0.349206
| 0.206349
| 0.301587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.267857
| 224
| 8
| 60
| 28
| 0.768293
| 0
| 0
| 0
| 0
| 0
| 0.137778
| 0.137778
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
121ac669a93519e31993f07eb9914764a2f6c62a
| 1,016
|
py
|
Python
|
SuperSafeRSA2/p1.py
|
Mitsububunu/picoCTF_Code
|
69bc2fda655f68d619d559d8ebcac3f3002e1e9b
|
[
"MIT"
] | null | null | null |
SuperSafeRSA2/p1.py
|
Mitsububunu/picoCTF_Code
|
69bc2fda655f68d619d559d8ebcac3f3002e1e9b
|
[
"MIT"
] | null | null | null |
SuperSafeRSA2/p1.py
|
Mitsububunu/picoCTF_Code
|
69bc2fda655f68d619d559d8ebcac3f3002e1e9b
|
[
"MIT"
] | null | null | null |
from pwn import *
c = 2406630770774067002969488973721471931018204466252985338778157023054313700122577969674801095968914138115159683835249515291036273321027004343571790131936084125655906043408782674885449464579968641081302429157166130115565352598598821570066283635305122709922390037730372591602868730589481713392049764648061801524
n = 147273688793934261024181248195230675783546488649508215206712909610823309020315167219784009002992362309068755668749150030649630484626726808282970862335298874686962338279846001531881628828732296269712243526593454314785171173549519521483321293695860652122785966138520696376542198407518431742168507833956480984961
e = 63898673129003779730878645535062396293775186608309292451636199166635042678069819794841971649603854650935655697405772023765540730127423849024616269748450841660052311890954340601218380974301418080097016300493123077080154006552987883044292330365369861886535969787230416626276257087720735443256331831473849394433
m = pow(c, 65537, n)
print hex(m)
print unhex(hex(m)[2:])
| 112.888889
| 313
| 0.959646
| 22
| 1,016
| 44.318182
| 0.727273
| 0.008205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.936556
| 0.022638
| 1,016
| 9
| 314
| 112.888889
| 0.045317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.142857
| null | null | 0.285714
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
123a34d43b045fd99b73605d6f3e94e85147f966
| 187,749
|
py
|
Python
|
other_models/kcnet.py
|
ZJUCAGD/GTS-CNN
|
a329f314b795f0dea0f46db623ac955a47619e7d
|
[
"MIT"
] | null | null | null |
other_models/kcnet.py
|
ZJUCAGD/GTS-CNN
|
a329f314b795f0dea0f46db623ac955a47619e7d
|
[
"MIT"
] | null | null | null |
other_models/kcnet.py
|
ZJUCAGD/GTS-CNN
|
a329f314b795f0dea0f46db623ac955a47619e7d
|
[
"MIT"
] | null | null | null |
import os, sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, "../utils"))
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from ops.layers import LoaclGeometricStructure, batch_knn, graph_max_pooling, group_points
from ops.layers import Fc, Perceptron, Geoconv, SphericalGeoconv,furthest_point_sample,three_nn,three_interpolate
from utils.misc import debugPrint
import json
import scipy.io as scio
# global writer
global_step=0
def gather_nd(input_tensor, indices):
"""
input_tensor: (b,n,c), float32
indices: (b,m), int
"""
batch_size = input_tensor.size(0)
# indices=indices.long()
return torch.stack([torch.index_select(input_tensor[k],0,indices[k]) for k in range(batch_size)]) # keep dim as xyz
class KCNetClassify(nn.Module):
def __init__(self, class_nums, device_id=0, initial_weights=True):
super(KCNetClassify, self).__init__()
self.class_nums = class_nums
self.knn_points = 16
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.kc = LoaclGeometricStructure(32, 16, 0.005)
self.mlp1 = nn.Sequential(
nn.Conv1d(32 + 3, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True)
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(True)
)
self.mlp3 = nn.Sequential(
nn.Conv1d(192, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(True)
)
self.classify = nn.Sequential(
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(256, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 20, 0.5)
self.cuda(device_id)
def forward(self, points):
knn_graph, _ = batch_knn(points, points.clone(), self.knn_points + 1)
# knn_graph = adptive_knn(points,self.knn_points+1) # knn_points=16
x = self.kc(points, knn_graph[:, :, 1:].contiguous())
x = torch.cat([points, x], dim=1)
x = self.mlp1(x)
y = graph_max_pooling(x, knn_graph)
x = self.mlp2(x)
x = torch.cat([x, y], dim=1)
x = self.mlp3(x)
x = F.max_pool1d(x, x.size(2), stride=1).squeeze(2)
x = self.classify(x)
return x
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
self.optimizer.zero_grad()
outputs = self(inputs)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 4 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 4))
global_step += 1
# print('global_step={}'.format(global_step))
writer.add_scalar('scalar/batch_loss_every4',batch_loss / 4, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
outputs = self(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
class AdaptiveKCNetClassify(nn.Module):
def __init__(self, class_nums, device_id=0, initial_weights=True):
super(AdaptiveKCNetClassify, self).__init__()
self.class_nums = class_nums
self.knn_points = 16
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.kc = LoaclGeometricStructure(32, 16, 0.005)
self.mlp1 = nn.Sequential(
nn.Conv1d(32 + 3, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True)
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(True)
)
self.mlp3 = nn.Sequential(
nn.Conv1d(192, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(True)
)
self.classify = nn.Sequential(
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(256, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 20, 0.5)
self.cuda(device_id)
def forward(self, points, knn_graph):
# knn_graph, _ = batch_knn(points, points.clone(), self.knn_points + 1)
# knn_graph = adptive_knn(points,self.knn_points+1) # knn_points=16
x = self.kc(points, knn_graph[:, :, 1:].contiguous())
x = torch.cat([points, x], dim=1)
x = self.mlp1(x)
y = graph_max_pooling(x, knn_graph)
x = self.mlp2(x)
x = torch.cat([x, y], dim=1)
x = self.mlp3(x)
x = F.max_pool1d(x, x.size(2), stride=1).squeeze(2)
x = self.classify(x)
return x
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
# global writer
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, graphs, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
# graphs = graphs.cuda(self.device_id) #zhuyijie
graphs = graphs.to(torch.device('cuda:0'),dtype=torch.int)
targets = targets.cuda(self.device_id)
self.optimizer.zero_grad()
outputs = self(inputs, graphs) #zhuyijie
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 4 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 4))
global_step += 1
# print('global_step={}'.format(global_step))
writer.add_scalar('scalar/batch_loss_every4',batch_loss / 4, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
for batch_idx, (inputs, graphs, targets) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
# graphs = graphs.cuda(self.device_id) #zhuyijie
graphs = graphs.to(torch.device('cuda:0'),dtype=torch.int)
targets = targets.cuda(self.device_id)
outputs = self(inputs, graphs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
class KCNetSegment(nn.Module):
def __init__(self, class_nums, category_nums, device_id=0, initial_weights=True):
super(KCNetSegment, self).__init__()
print('use KCNetSegment!!!')
self.class_nums = class_nums
self.category_nums = category_nums
self.knn_points = 18
self.device_id = device_id
self.seg_classes = {
'Airplane': [0, 1, 2, 3],
'Bag': [4, 5],
'Cap': [6, 7],
'Car': [8, 9, 10, 11],
'Chair': [12, 13, 14, 15],
'Earphone': [16, 17, 18],
'Guitar': [19, 20, 21],
'Knife': [22, 23],
'Lamp': [24, 25, 26, 27],
'Laptop': [28, 29],
'Motorbike': [30, 31, 32, 33, 34, 35],
'Mug': [36, 37],
'Pistol': [38, 39, 40],
'Rocket': [41, 42, 43],
'Skateboard': [44, 45, 46],
'Table': [47, 48, 49]
}
self.seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in self.seg_classes.keys():
for label in self.seg_classes[cat]:
self.seg_label_to_cat[label] = cat
self.kc = LoaclGeometricStructure(16, 18, 0.005)
self.mlp1 = nn.Sequential(
nn.Conv1d(3 + 16, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True)
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True)
)
self.mlp3 = nn.Sequential(
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(True)
)
self.mlp4 = nn.Sequential(
nn.Conv1d(128, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(True)
)
self.mlp5 = nn.Sequential(
nn.Conv1d(128, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(True)
)
self.mlp6 = nn.Sequential(
nn.Conv1d(512, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(True)
)
self.mlp7 = nn.Sequential(
nn.Conv1d(3 + 16 + 64 + 64 + 128 + 128 + 512 + 1024 + category_nums, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Conv1d(512, 256, 1),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.3),
nn.Conv1d(256, class_nums, 1)
)
if initial_weights:
self.initialize_weights()
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 20, 0.5)
self.cuda(device_id)
def forward(self, points, labels):
knn_graph, _ = batch_knn(points, points.clone(), self.knn_points + 1)
x1 = self.kc(points, knn_graph[:, :, 1:].contiguous())
x1 = torch.cat([points, x1], dim=1)
x2 = self.mlp1(x1)
x3 = self.mlp2(x2)
x4 = self.mlp3(x3)
x5 = graph_max_pooling(x4, knn_graph)
x5 = self.mlp4(x5)
x6 = self.mlp5(x5)
x7 = graph_max_pooling(x6, knn_graph)
x7 = self.mlp6(x7)
x7 = F.max_pool1d(x7, x7.size(2), stride=1)
x7 = x7.repeat([1, 1, knn_graph.size(1)])
index = labels.unsqueeze(1).repeat([1, knn_graph.size(1)]).unsqueeze(1)
one_hot = torch.zeros([knn_graph.size(0), self.category_nums, knn_graph.size(1)])
one_hot = one_hot.cuda(self.device_id)
one_hot = one_hot.scatter_(1, index, 1)
x = torch.cat([x1, x2, x3, x4, x5, x6, x7, one_hot], dim=1)
x = self.mlp7(x)
return x
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer): #zhuyijie
# global writer
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, _, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
self.optimizer.zero_grad()
outputs = self(inputs, labels)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
shape_ious = {cat:[] for cat in self.seg_classes.keys()}
with torch.no_grad():
for batch_idx, (inputs, _, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
outputs = self(inputs,labels)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0) * targets.size(1)
correct += (predicted == targets).sum().item()
self.compute_miou(predicted.cpu().numpy(),targets.cpu().numpy(),shape_ious)
# debugPrint(shape_ious['Airplane'])
ret=self.get_miou(shape_ious) #{'cls':value,'ins':value}
print('cls_miou = {:4f}, ins_miou = {:4f}'.format(ret['ins'],ret['cls']))
print('Accuracy of the network: %.2f %%' % (100.0 * correct / total))
return correct / total
def compute_miou(self,pred_label,true_label, shape_ious):
"""
pred_label: numpy array, (b,n), int
true_label: numpy array, (b,n), int
"""
batch_size=true_label.shape[0]
for bi in range(batch_size):
segp = pred_label[bi, :]
segl = true_label[bi, :]
cat = self.seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(self.seg_classes[cat]))]
for l in self.seg_classes[cat]:
if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0): # part is not present, no prediction as well
iou = 1.0
else:
iou = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
part_ious[l - self.seg_classes[cat][0]] = iou
shape_ious[cat].append(np.mean(part_ious))
def get_miou(self,shape_ious):
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat]) if len(shape_ious[cat])>0 else 0
cls_miou = np.mean(list(shape_ious.values()))
ins_miou = np.mean(all_shape_ious)
ret = dict(shape_ious)
ret['cls'] = cls_miou
ret['ins'] = ins_miou
return ret
class AdaptiveKCNetSegment(nn.Module):
def __init__(self, class_nums, category_nums, device_id=0, initial_weights=True):
super(AdaptiveKCNetSegment, self).__init__()
self.class_nums = class_nums
self.category_nums = category_nums
self.knn_points = 18
self.device_id = device_id
self.kc = LoaclGeometricStructure(16, 18, 0.005)
self.mlp1 = nn.Sequential(
nn.Conv1d(3 + 16, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True)
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(True)
)
self.mlp3 = nn.Sequential(
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(True)
)
self.mlp4 = nn.Sequential(
nn.Conv1d(128, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(True)
)
self.mlp5 = nn.Sequential(
nn.Conv1d(128, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(True)
)
self.mlp6 = nn.Sequential(
nn.Conv1d(512, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(True)
)
self.mlp7 = nn.Sequential(
nn.Conv1d(3 + 16 + 64 + 64 + 128 + 128 + 512 + 1024 + category_nums, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Conv1d(512, 256, 1),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.3),
nn.Conv1d(256, class_nums, 1)
)
if initial_weights:
self.initialize_weights()
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 20, 0.5)
self.cuda(device_id)
def forward(self, points, knn_graph, labels):
# knn_graph, _ = batch_knn(points, points.clone(), self.knn_points + 1)
x1 = self.kc(points, knn_graph[:, :, 1:].contiguous())
x1 = torch.cat([points, x1], dim=1)
x2 = self.mlp1(x1)
x3 = self.mlp2(x2)
x4 = self.mlp3(x3)
x5 = graph_max_pooling(x4, knn_graph)
x5 = self.mlp4(x5)
x6 = self.mlp5(x5)
x7 = graph_max_pooling(x6, knn_graph)
x7 = self.mlp6(x7)
x7 = F.max_pool1d(x7, x7.size(2), stride=1)
x7 = x7.repeat([1, 1, knn_graph.size(1)])
index = labels.unsqueeze(1).repeat([1, knn_graph.size(1)]).unsqueeze(1)
one_hot = torch.zeros([knn_graph.size(0), self.category_nums, knn_graph.size(1)])
one_hot = one_hot.cuda(self.device_id)
one_hot = one_hot.scatter_(1, index, 1)
x = torch.cat([x1, x2, x3, x4, x5, x6, x7, one_hot], dim=1)
x = self.mlp7(x)
return x
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer): #zhuyijie
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, graphs, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
graphs = graphs.to(torch.device('cuda:0'),dtype=torch.int) ###zhuyijie
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
self.optimizer.zero_grad()
outputs = self(inputs, graphs, labels)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 4 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 4))
global_step += 1
# print('global_step={}'.format(global_step))
writer.add_scalar('scalar/batch_loss_every4',batch_loss / 4, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
for batch_idx, (inputs, graphs, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
graphs = graphs.to(torch.device('cuda:0'),dtype=torch.int) ###zhuyijie
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
outputs = self(inputs, graphs, labels)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0) * targets.size(1)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
# 2019.10.30
class GeoNetSegment(nn.Module):
def __init__(self, input_channels, class_nums=50,category_nums=16, device_id=0, initial_weights=True):
super(GeoNetSegment,self).__init__()
# self.knn_points = 16 # {8,16,32}
self.input_channels = input_channels
self.class_nums = class_nums
self.category_nums = category_nums
self.knn_points = 18
self.device_id = device_id
self.seg_classes = {
'Airplane': [0, 1, 2, 3],
'Bag': [4, 5],
'Cap': [6, 7],
'Car': [8, 9, 10, 11],
'Chair': [12, 13, 14, 15],
'Earphone': [16, 17, 18],
'Guitar': [19, 20, 21],
'Knife': [22, 23],
'Lamp': [24, 25, 26, 27],
'Laptop': [28, 29],
'Motorbike': [30, 31, 32, 33, 34, 35],
'Mug': [36, 37],
'Pistol': [38, 39, 40],
'Rocket': [41, 42, 43],
'Skateboard': [44, 45, 46],
'Table': [47, 48, 49]
}
self.seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in self.seg_classes.keys():
for label in self.seg_classes[cat]:
self.seg_label_to_cat[label] = cat
if initial_weights:
self.initialize_weights()
# self.FC1 = Fc(input_channels,[64,128,384],input_dim=4, bn='BN', activation_fn='relu')
# self.FC2 = Fc(input_channels,[64], bn='BN', activation_fn='relu')
# self.FC3 = Fc(128,[256], bn='BN', activation_fn='relu')
# self.FC4 = Fc(768,[2048], bn='BN', activation_fn='relu')
self.geo1 = Geoconv(3, 64, 32, 0.05, 0.15, bn=True)
self.geo2 = Geoconv(64, 64, 32, 0.1, 0.2, bn=True)
self.geo3 = Geoconv(64, 64, 32, 0.15, 0.3, bn=True)
self.geo4 = Geoconv(64, 128, 64, 0.2, 0.4, bn=True)
self.geo5 = Geoconv(128, 1024, 128, 0.2, 0.4, bn=True)
self.geo6 = Geoconv(1024+category_nums+64, 512, 32, 0.15, 0.3, bn=True)
self.geo7 = Geoconv(512, 256, 32, 0.1, 0.2, bn=True)
self.geo8 = Geoconv(256, 128, 32, 0.05, 0.1, bn=True)
self.geo9 = Geoconv(128, 128, 32, 0.05, 0.1, bn=True)
self.classify = nn.Sequential(
# nn.Conv1d(3 + 16 + 64 + 64 + 128 + 128 + 512 + 1024 + category_nums, 512, 1),
# nn.BatchNorm1d(512),
# nn.ReLU(True),
nn.Dropout(0.5),
nn.Conv1d(128, class_nums, 1)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud,labels): #B,C,N
# b,n,npoints,c=x.size()
point_cloud=point_cloud.transpose(1,2)
xyz=point_cloud
b,n,c=xyz.size()
y=self.geo1(point_cloud,xyz) #---->(b,n,64)
y=self.geo2(y,xyz) #---->(b,n,64)
y=self.geo3(y,xyz) #---->(b,n,64)
point_feat = y
y=self.geo4(y,xyz) #---->(b,n,128)
y=self.geo5(y,xyz) #---->(b,n,1024)
y=torch.max(y, 1, keepdim=True)[0] # (b,1,1024)
# index = labels.unsqueeze(1).repeat([1, n]).unsqueeze(1)
one_hot = torch.zeros([b, self.category_nums],device=self.device_id).scatter_(1, labels.view(-1, 1), 1) # (b,c)
y=torch.cat([y,one_hot.unsqueeze(1)],dim=2) # -->(b,1,1024+1)
y=torch.cat([point_feat, y.repeat([1,n,1])],dim=2) # -->(b,n,1024+1+64)
y=self.geo6(y,xyz) #---->(b,n,512)
y=self.geo7(y,xyz) #---->(b,n,256)
y=self.geo8(y,xyz) #---->(b,n,128)
y=self.geo9(y,xyz) #---->(b,n,128)
y= self.classify(y.transpose(1,2)) # -->(b,2,n)
return y #(b,50,2048)
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, _, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id) # (b,3,2048)
targets = targets.cuda(self.device_id) #(b,2018)
labels = labels.cuda(self.device_id) #(b,)
self.optimizer.zero_grad()
outputs = self(inputs,labels)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
shape_ious = {cat:[] for cat in self.seg_classes.keys()}
with torch.no_grad():
for batch_idx, (inputs, _, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
outputs = self(inputs,labels)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0) * targets.size(1)
correct += (predicted == targets).sum().item()
self.compute_miou(predicted.cup().numpy(),targets.cup().numpy(),shape_ious)
ret=self.get_miou(shape_ious) #{'cls':value,'ins':value}
print('cls_miou = {}, ins_miou = {}'.format(res['cls'],ret['ins']))
print('Accuracy of the network: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def compute_miou(self,predicted,targets, shape_ious):
"""
predicted: numpy array, (b,n), int
targets: numpy array, (b,n), int
"""
batch_size=targets.shape[0]
for bi in range(batch_size):
segp = pred_label[bi, :]
segl = true_label[bi, :]
cat = self.seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(self.seg_classes[cat]))]
for l in self.seg_classes[cat]:
if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0): # part is not present, no prediction as well
iou = 1.0
else:
iou = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
part_ious[l - self.seg_classes[cat][0]] = iou
shape_ious[cat].append(np.mean(part_ious))
def get_miou(self,shape_ious):
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat])
cls_miou = np.mean(shape_ious.values())
ins_miou = np.mean(all_shape_ious)
ret = dict(shape_ious)
ret['cls'] = cls_miou
ret['ins'] = ins_miou
return ret
class TestGeoNetSegment(nn.Module):
def __init__(self, input_channels, class_nums=50,category_nums=16, device_id=0, initial_weights=True):
super(TestGeoNetSegment,self).__init__()
# self.knn_points = 16 # {8,16,32}
self.name='TestGeoNetSegment'
print(self.name)
self.input_channels = input_channels
self.class_nums = class_nums
self.category_nums = category_nums
self.knn_points = 18
self.device_id = device_id
self.seg_classes = {
'Airplane': [0, 1, 2, 3],
'Bag': [4, 5],
'Cap': [6, 7],
'Car': [8, 9, 10, 11],
'Chair': [12, 13, 14, 15],
'Earphone': [16, 17, 18],
'Guitar': [19, 20, 21],
'Knife': [22, 23],
'Lamp': [24, 25, 26, 27],
'Laptop': [28, 29],
'Motorbike': [30, 31, 32, 33, 34, 35],
'Mug': [36, 37],
'Pistol': [38, 39, 40],
'Rocket': [41, 42, 43],
'Skateboard': [44, 45, 46],
'Table': [47, 48, 49]
}
self.seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in self.seg_classes.keys():
for label in self.seg_classes[cat]:
self.seg_label_to_cat[label] = cat
if initial_weights:
self.initialize_weights()
# self.FC1 = Fc(input_channels,[64,128,384],input_dim=4, bn='BN', activation_fn='relu')
self.FC2 = Fc(input_channels,[64], bn='BN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='BN', activation_fn='relu')
self.FC4 = Fc(512,[1024], bn='BN', activation_fn='relu')
self.geo1 = Geoconv(64, 128, 64, 0.1, 0.2, bn=True)
# self.geo2 = Geoconv(64, 256, 64, 0.1, 0.2, bn=True)
# self.geo3 = Geoconv(64, 64, 32, 0.15, 0.3, bn=True)
self.geo3 = Geoconv(256, 512, 64, 0.2, 0.3, bn=True)
# self.geo5 = Geoconv(128, 1024, 128, 0.2, 0.4, bn=True)
# self.geo4 = Geoconv(1024+category_nums+64, 512, 64, 0.15, 0.3, bn=True)
# self.geo5 = Geoconv(512, 256, 32, 0.05, 0.15, bn=True)
# self.geo8 = Geoconv(256, 128, 32, 0.05, 0.1, bn=True)
# self.geo9 = Geoconv(128, 128, 32, 0.05, 0.1, bn=True)
self.classify = nn.Sequential(
nn.Conv1d(3 + 128 + 512 + 1024 + category_nums, 512, 1,bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Conv1d(512, class_nums, 1,bias=True)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud,labels): #B,C,N
# b,n,npoints,c=x.size()
point_cloud=point_cloud.transpose(1,2)
xyz=point_cloud
b,n,c=xyz.size()
point_cloud=self.FC2(point_cloud)
y1=self.geo1(point_cloud,xyz) #---->(b,n,64)
y2=self.FC3(y1) #(b,n,256)
y3=self.geo3(y2,xyz) #---->(b,n,512)
y4=self.FC4(y3) #(B,N,1024)
y=torch.max(y4, 1, keepdim=True)[0] # (b,1,1024)
# index = labels.unsqueeze(1).repeat([1, n]).unsqueeze(1)
one_hot = torch.zeros([b, self.category_nums],device=self.device_id).scatter_(1, labels.view(-1, 1), 1) # (b,c)
y=torch.cat([xyz,y1,y3,y.repeat([1,n,1]), one_hot.unsqueeze(1).repeat([1,n,1])],dim=2) # -->(b,n,3+128+512+1024+16)
# y=torch.cat([point_feat, y.repeat([1,n,1])],dim=2) # -->(b,n,1024+16+64)
# y=self.geo4(y,xyz) #---->(b,n,512)
# y=self.geo5(y,xyz) #---->(b,n,256)
# y=self.geo8(y,xyz) #---->(b,n,128)
# y=self.geo9(y,xyz) #---->(b,n,128)
y= self.classify(y.transpose(1,2)) # -->(b,50,n)
return y #(b,50,2048)
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, _, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id) # (b,3,2048)
targets = targets.cuda(self.device_id) #(b,2018)
labels = labels.cuda(self.device_id) #(b,)
self.optimizer.zero_grad()
outputs = self(inputs,labels)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader, is_save=False):
self.eval()
correct = 0.
total = 0
shape_ious = {cat:[] for cat in self.seg_classes.keys()}
with torch.no_grad():
for batch_idx, (inputs, _, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
outputs = self(inputs,labels)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0) * targets.size(1)
correct += (predicted == targets).sum().item()
self.compute_miou(predicted.cpu().numpy(),targets.cpu().numpy(),shape_ious)
# if batch_idx>10:
# break
# debugPrint(shape_ious['Airplane'])
ret=self.get_miou(shape_ious) #{'cls':value,'ins':value}
print('cls_miou = {:4f}, ins_miou = {:4f}'.format(ret['cls'],ret['ins']))
print('Accuracy of the network: %.2f %%' % (100.0 * correct / total))
if is_save:
with open('./{}_miou.txt'.format(self.name),'a+') as file:
file.writelines(json.dumps(ret)+'\n')
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def compute_miou(self,pred_label,true_label, shape_ious):
"""
pred_label: numpy array, (b,n), int
true_label: numpy array, (b,n), int
"""
batch_size=true_label.shape[0]
for bi in range(batch_size):
segp = pred_label[bi, :]
segl = true_label[bi, :]
cat = self.seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(self.seg_classes[cat]))]
for l in self.seg_classes[cat]:
if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0): # part is not present, no prediction as well
iou = 1.0
else:
iou = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
part_ious[l - self.seg_classes[cat][0]] = iou
shape_ious[cat].append(np.mean(part_ious))
def get_miou(self,shape_ious):
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat]) if len(shape_ious[cat])>0 else 0
cls_miou = np.mean(list(shape_ious.values()))
ins_miou = np.mean(all_shape_ious)
ret = dict(shape_ious)
ret['cls'] = cls_miou
ret['ins'] = ins_miou
return ret
# 11.2
class TestKNNGeoNetSegment(nn.Module):
def __init__(self, input_channels, class_nums=50,category_nums=16, device_id=0, initial_weights=True):
super(TestKNNGeoNetSegment,self).__init__()
# self.knn_points = 16 # {8,16,32}
self.name='TestKNNGeoNetSegment'
print(self.name)
self.input_channels = input_channels
self.class_nums = class_nums
self.category_nums = category_nums
self.knn_points = 16
self.device_id = device_id
self.best_score = 0.0
self.seg_classes = {
'Airplane': [0, 1, 2, 3],
'Bag': [4, 5],
'Cap': [6, 7],
'Car': [8, 9, 10, 11],
'Chair': [12, 13, 14, 15],
'Earphone': [16, 17, 18],
'Guitar': [19, 20, 21],
'Knife': [22, 23],
'Lamp': [24, 25, 26, 27],
'Laptop': [28, 29],
'Motorbike': [30, 31, 32, 33, 34, 35],
'Mug': [36, 37],
'Pistol': [38, 39, 40],
'Rocket': [41, 42, 43],
'Skateboard': [44, 45, 46],
'Table': [47, 48, 49]
}
self.seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in self.seg_classes.keys():
for label in self.seg_classes[cat]:
self.seg_label_to_cat[label] = cat
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,128],input_dim=4, bn='GN', activation_fn='relu')
# self.FC2 = Fc(input_channels,[64], bn='BN', activation_fn='relu')
self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
self.FC2_2 = Fc(32,[64], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[128,256], bn='GN', activation_fn='relu')
self.FC4 = Fc(256,[256,512], bn='GN', activation_fn='relu')
self.geo1 = Geoconv(64, 128, 32, 0.1, 0.2, bn=True)
self.geo2 = Geoconv(256, 256, 64, 0.15, 0.3, bn=True)
self.classify = nn.Sequential(
nn.Conv1d(3 + 128 + 64 + 128 + 256+ 512 + category_nums, 512, 1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Conv1d(512, class_nums, 1, bias=True)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud,labels): #B,C,N
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
# debugPrint(knn_graph.size())
x = group_points(point_cloud, knn_graph[:16].contiguous()) # ---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
x=self.FC1(x) #------->(B,N,npoints,128)
x=torch.max(x,2,keepdim=False)[0] # b,n,128
xyz=point_cloud.transpose(1,2)
b,n,c=xyz.size()
# point_cloud=self.FC2(point_cloud)
### begin ARPE
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,64)
### end ARPE
y0=y
y1=self.geo1(y,xyz) #---->(b,n,128)
y2=self.FC3(y1) #(b,n,256)
y3=self.geo2(y2,xyz) #---->(b,n,256)
y4=self.FC4(y3) #(B,N,512)
y=torch.max(y4, 1, keepdim=True)[0] # (b,1,512)
one_hot = torch.zeros([b, self.category_nums],device=self.device_id).scatter_(1, labels.view(-1, 1), 1) # (b,c)
y=torch.cat([xyz,x,y0,y1,y3,y.repeat([1,n,1]), one_hot.unsqueeze(1).repeat([1,n,1])],dim=2) #(b,n,3+128+64+128+256+512+16)
y= self.classify(y.transpose(1,2)) # -->(b,50,n)
return y #(b,50,2048)
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, _, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id) # (b,3,2048)
targets = targets.cuda(self.device_id) #(b,2018)
labels = labels.cuda(self.device_id) #(b,)
self.optimizer.zero_grad()
outputs = self(inputs,labels)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader, is_save=False):
self.eval()
correct = 0.
total = 0
shape_ious = {cat:[] for cat in self.seg_classes.keys()}
with torch.no_grad():
for batch_idx, (inputs, _, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
outputs = self(inputs,labels)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0) * targets.size(1)
correct += (predicted == targets).sum().item()
self.compute_miou(predicted.cpu().numpy(),targets.cpu().numpy(),shape_ious)
# if batch_idx>10:
# break
# debugPrint(shape_ious['Airplane'])
ret=self.get_miou(shape_ious) #{'cls':value,'ins':value}
print('cls_miou = {:4f}, ins_miou = {:4f}'.format(ret['cls'],ret['ins']))
print('Accuracy of the network: %.2f %%' % (100.0 * correct / total))
if is_save:
with open('./{}_miou.txt'.format(self.name),'a+') as file:
file.writelines(json.dumps(ret)+'\n')
if self.best_score<ret['ins']:
self.best_score=ret['ins']
torch.save(self, './model_param/{}_best_weight_1103.ckpt'.format(self.name))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def compute_miou(self,pred_label,true_label, shape_ious):
"""
pred_label: numpy array, (b,n), int
true_label: numpy array, (b,n), int
"""
batch_size=true_label.shape[0]
for bi in range(batch_size):
segp = pred_label[bi, :]
segl = true_label[bi, :]
cat = self.seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(self.seg_classes[cat]))]
for l in self.seg_classes[cat]:
if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0): # part is not present, no prediction as well
iou = 1.0
else:
iou = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
part_ious[l - self.seg_classes[cat][0]] = iou
shape_ious[cat].append(np.mean(part_ious))
def get_miou(self,shape_ious):
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat]) if len(shape_ious[cat])>0 else 0
cls_miou = np.mean(list(shape_ious.values()))
ins_miou = np.mean(all_shape_ious)
ret = dict(shape_ious)
ret['cls'] = cls_miou
ret['ins'] = ins_miou
return ret
# 11.3
class TestKNNPIGeoNetSegment(nn.Module):
def __init__(self, input_channels, class_nums=50,category_nums=16, device_id=0, initial_weights=True):
super(TestKNNPIGeoNetSegment,self).__init__()
# self.knn_points = 16 # {8,16,32}
self.name='TestKNNPIGeoNetSegment'
print(self.name)
self.input_channels = input_channels
self.class_nums = class_nums
self.category_nums = category_nums
self.knn_points = 16
self.device_id = device_id
self.best_score = 0.0
self.seg_classes = {
'Airplane': [0, 1, 2, 3],
'Bag': [4, 5],
'Cap': [6, 7],
'Car': [8, 9, 10, 11],
'Chair': [12, 13, 14, 15],
'Earphone': [16, 17, 18],
'Guitar': [19, 20, 21],
'Knife': [22, 23],
'Lamp': [24, 25, 26, 27],
'Laptop': [28, 29],
'Motorbike': [30, 31, 32, 33, 34, 35],
'Mug': [36, 37],
'Pistol': [38, 39, 40],
'Rocket': [41, 42, 43],
'Skateboard': [44, 45, 46],
'Table': [47, 48, 49]
}
self.seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in self.seg_classes.keys():
for label in self.seg_classes[cat]:
self.seg_label_to_cat[label] = cat
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,128],input_dim=4, bn='GN', activation_fn='relu')
# self.FC2 = Fc(input_channels,[64], bn='BN', activation_fn='relu')
self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
self.FC2_2 = Fc(32,[64], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[128,256], bn='GN', activation_fn='relu')
self.FC4 = Fc(256,[256,512], bn='GN', activation_fn='relu')
# self.geo1 = Fc(64,[128], bn='BN', activation_fn='relu')
self.geo1 = Geoconv(64, 128, 32, 0.1, 0.2, bn=True)
self.geo2 = Geoconv(256, 256, 64, 0.15, 0.3, bn=True)
##### 50*50
self.pi_conv=nn.Sequential(
nn.Conv2d(1,out_channels=4,kernel_size=4,stride=2,bias=False),#--->(b,4,23,23)
nn.BatchNorm2d(4),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2), #--->(b,4,23)
nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,11,11)
nn.BatchNorm2d(16),
# nn.GroupNorm(4, 8),
nn.ReLU(),
# nn.MaxPool2d(3, stride=2), #--->(b,16,11,11)
nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,5,5)
nn.BatchNorm2d(64),
# nn.GroupNorm(4, 16),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2) #--->(b,16,12)
nn.Conv2d(64,out_channels=128,kernel_size=3,stride=2,bias=False),#--->(b,128,2,2)
nn.BatchNorm2d(128),
# nn.GroupNorm(4, 16),
nn.ReLU()
)
self.classify_pi=Fc(512,[256,128], bn='GN', activation_fn='relu')
self.classify = nn.Sequential(
nn.Conv1d(3 + 128 + 64 + 128 + 256+ 512 + 128 + category_nums, 512, 1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Conv1d(512, class_nums, 1, bias=True)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud, pi, labels): #(B,C,N), (B,50,50)
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
# debugPrint(knn_graph.size())
x = group_points(point_cloud, knn_graph[:16].contiguous()) # ---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
x=self.FC1(x) #------->(B,N,npoints,128)
x=torch.max(x,2,keepdim=False)[0] # b,n,128
xyz=point_cloud.transpose(1,2)
b,n,c=xyz.size()
# point_cloud=self.FC2(point_cloud)
### begin ARPE
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,64)
### end ARPE
y0=y
y1=self.geo1(y,xyz) #---->(b,n,128)
# y1=self.geo1(y) #---->(b,n,128)
y2=self.FC3(y1) #(b,n,256)
y3=self.geo2(y2,xyz) #---->(b,n,256)
# y3=y2
y4=self.FC4(y3) #(B,N,512)
y=torch.max(y4, 1, keepdim=True)[0] # (b,1,512)
one_hot = torch.zeros([b, self.category_nums],device=self.device_id).scatter_(1, labels.view(-1, 1), 1) # (b,c)
pi=self.pi_conv(pi.unsqueeze(1)).view(b,-1)
pi=self.classify_pi(pi) ##--->(b,128)
y=torch.cat([xyz,x,y0,y1,y3,y.repeat([1,n,1]), pi.unsqueeze(1).repeat([1,n,1]),one_hot.unsqueeze(1).repeat([1,n,1])],dim=2) #(b,n,3+128+64+128+256+512+128+16)
y= self.classify(y.transpose(1,2)) # -->(b,50,n)
return y #(b,50,2048)
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, _, pi, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id) # (b,3,2048)
targets = targets.cuda(self.device_id) #(b,2018)
labels = labels.cuda(self.device_id) #(b,)
pi = pi.to(device=self.device_id,dtype=torch.float32)
self.optimizer.zero_grad()
outputs = self(inputs,pi,labels)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader, is_save=False):
self.eval()
correct = 0.
total = 0
shape_ious = {cat:[] for cat in self.seg_classes.keys()}
with torch.no_grad():
for batch_idx, (inputs, _, pi, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
pi = pi.to(device=self.device_id,dtype=torch.float32)
outputs = self(inputs,pi,labels)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0) * targets.size(1)
correct += (predicted == targets).sum().item()
self.compute_miou(predicted.cpu().numpy(),targets.cpu().numpy(),shape_ious)
# if batch_idx>10:
# break
# debugPrint(shape_ious['Airplane'])
ret=self.get_miou(shape_ious) #{'cls':value,'ins':value}
print('cls_miou = {:4f}, ins_miou = {:4f}'.format(ret['cls'],ret['ins']))
print('Accuracy of the network: %.2f %%' % (100.0 * correct / total))
is_save=False
if is_save:
with open('./{}_miou.txt'.format(self.name),'a+') as file:
file.writelines(json.dumps(ret)+'\n')
if self.best_score<ret['ins']:
self.best_score=ret['ins']
torch.save(self, './model_param/{}_best_weight_1103.ckpt'.format(self.name))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def compute_miou(self,pred_label,true_label, shape_ious):
"""
pred_label: numpy array, (b,n), int
true_label: numpy array, (b,n), int
"""
batch_size=true_label.shape[0]
for bi in range(batch_size):
segp = pred_label[bi, :]
segl = true_label[bi, :]
cat = self.seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(self.seg_classes[cat]))]
for l in self.seg_classes[cat]:
if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0): # part is not present, no prediction as well
iou = 1.0
else:
iou = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
part_ious[l - self.seg_classes[cat][0]] = iou
shape_ious[cat].append(np.mean(part_ious))
def get_miou(self,shape_ious):
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat]) if len(shape_ious[cat])>0 else 0
cls_miou = np.mean(list(shape_ious.values()))
ins_miou = np.mean(all_shape_ious)
ret = dict(shape_ious)
ret['cls'] = cls_miou
ret['ins'] = ins_miou
return ret
# 11.4
class TestKNNPIGeoNetSegment_fps(nn.Module):
def __init__(self, input_channels, class_nums=50,category_nums=16, device_id=0, initial_weights=True):
super(TestKNNPIGeoNetSegment_fps,self).__init__()
# self.knn_points = 16 # {8,16,32}
self.name='TestKNNPIGeoNetSegment_fps'
print(self.name)
self.input_channels = input_channels
self.class_nums = class_nums
self.category_nums = category_nums
self.knn_points = 16
self.device_id = device_id
self.best_score = 0.0
self.seg_classes = {
'Airplane': [0, 1, 2, 3],
'Bag': [4, 5],
'Cap': [6, 7],
'Car': [8, 9, 10, 11],
'Chair': [12, 13, 14, 15],
'Earphone': [16, 17, 18],
'Guitar': [19, 20, 21],
'Knife': [22, 23],
'Lamp': [24, 25, 26, 27],
'Laptop': [28, 29],
'Motorbike': [30, 31, 32, 33, 34, 35],
'Mug': [36, 37],
'Pistol': [38, 39, 40],
'Rocket': [41, 42, 43],
'Skateboard': [44, 45, 46],
'Table': [47, 48, 49]
}
self.seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in self.seg_classes.keys():
for label in self.seg_classes[cat]:
self.seg_label_to_cat[label] = cat
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,128],input_dim=4, bn='GN', activation_fn='relu')
# self.FC2 = Fc(input_channels,[64], bn='BN', activation_fn='relu')
self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
self.FC2_2 = Fc(32,[64], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[128,256], bn='GN', activation_fn='relu')
self.FC4 = Fc(256,[256,512], bn='GN', activation_fn='relu')
# self.geo1 = Fc(64,[128], bn='BN', activation_fn='relu')
self.geo1 = Geoconv(64, 128, 32, 0.1, 0.2, bn=True)
self.geo2 = Geoconv(256, 256, 64, 0.15, 0.3, bn=True)
##### 50*50
self.pi_conv=nn.Sequential(
nn.Conv2d(1,out_channels=4,kernel_size=4,stride=2,bias=False),#--->(b,4,23,23)
nn.BatchNorm2d(4),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2), #--->(b,4,23)
nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,11,11)
nn.BatchNorm2d(16),
# nn.GroupNorm(4, 8),
nn.ReLU(),
# nn.MaxPool2d(3, stride=2), #--->(b,16,11,11)
nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,5,5)
nn.BatchNorm2d(64),
# nn.GroupNorm(4, 16),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2) #--->(b,16,12)
nn.Conv2d(64,out_channels=128,kernel_size=3,stride=2,bias=False),#--->(b,128,2,2)
nn.BatchNorm2d(128),
# nn.GroupNorm(4, 16),
nn.ReLU()
)
self.classify_pi=Fc(512,[256,128], bn='GN', activation_fn='relu')
self.classify = nn.Sequential(
nn.Conv1d(3 + 128 + 64 + 128 + 256 + 512 + 128 + category_nums, 512, 1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Conv1d(512, class_nums, 1, bias=True)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud, pi, labels): #(B,C,N), (B,50,50)
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
# debugPrint(knn_graph.size())
x = group_points(point_cloud, knn_graph[:16].contiguous()) # ---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
x=self.FC1(x) #------->(B,N,npoints,128)
x=torch.max(x,2,keepdim=False)[0] # b,n,128
xyz=point_cloud.transpose(1,2).contiguous()
b,n,c=xyz.size()
# point_cloud=self.FC2(point_cloud)
### begin ARPE
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,64)
### end ARPE
y0=y
y1=self.geo1(y,xyz) #---->(b,n,128)
sample_index = furthest_point_sample(xyz,1024).long()
sample_xyz = gather_nd(xyz, sample_index)
sample_y1 = gather_nd(y1, sample_index)
# debugPrint(sample_y1)
y2=self.FC3(sample_y1) #(b,n,256)
y3=self.geo2(y2,sample_xyz) #---->(b,n,256)
# y3=y2
upsample=True
if upsample:
dist, idx = three_nn(xyz, sample_xyz)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_y3 = three_interpolate(y3.transpose(1,2).contiguous(), idx, weight).transpose(1,2) # (b,1024,c)--->(b,2048,c)
# debugPrint(interpolated_y3.size())
y4=self.FC4(y3) #(B,N,512)
y=torch.max(y4, 1, keepdim=True)[0] # (b,1,512)
one_hot = torch.zeros([b, self.category_nums],device=self.device_id).scatter_(1, labels.view(-1, 1), 1) # (b,c)
pi=self.pi_conv(pi.unsqueeze(1)).view(b,-1)
pi=self.classify_pi(pi) ##--->(b,128)
y=torch.cat([xyz,x,y0,y1,interpolated_y3,y.repeat([1,n,1]), pi.unsqueeze(1).repeat([1,n,1]),one_hot.unsqueeze(1).repeat([1,n,1])],dim=2) #(b,n,3+128+64+128+512+128+16)
y= self.classify(y.transpose(1,2)) # -->(b,50,n)
return y #(b,50,2048)
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, _, pi, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id) # (b,3,2048)
targets = targets.cuda(self.device_id) #(b,2018)
labels = labels.cuda(self.device_id) #(b,)
pi = pi.to(device=self.device_id,dtype=torch.float32)
self.optimizer.zero_grad()
outputs = self(inputs,pi,labels)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
# raise Exception
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader, is_save=False):
self.eval()
correct = 0.
total = 0
shape_ious = {cat:[] for cat in self.seg_classes.keys()}
with torch.no_grad():
for batch_idx, (inputs, _, pi, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
pi = pi.to(device=self.device_id,dtype=torch.float32)
outputs = self(inputs,pi,labels)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0) * targets.size(1)
correct += (predicted == targets).sum().item()
self.compute_miou(predicted.cpu().numpy(),targets.cpu().numpy(),shape_ious)
# if batch_idx>10:
# break
# debugPrint(shape_ious['Airplane'])
ret=self.get_miou(shape_ious) #{'cls':value,'ins':value}
print('cls_miou = {:4f}, ins_miou = {:4f}'.format(ret['cls'],ret['ins']))
print('Accuracy of the network: %.2f %%' % (100.0 * correct / total))
# is_save=False
if is_save:
with open('./{}__upsample_miou.txt'.format(self.name),'a+') as file:
file.writelines(json.dumps(ret)+'\n')
if self.best_score<ret['ins']:
self.best_score=ret['ins']
torch.save(self, './model_param/{}_best_weight_1104_upsample.ckpt'.format(self.name))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def compute_miou(self,pred_label,true_label, shape_ious):
"""
pred_label: numpy array, (b,n), int
true_label: numpy array, (b,n), int
"""
batch_size=true_label.shape[0]
for bi in range(batch_size):
segp = pred_label[bi, :]
segl = true_label[bi, :]
cat = self.seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(self.seg_classes[cat]))]
for l in self.seg_classes[cat]:
if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0): # part is not present, no prediction as well
iou = 1.0
else:
iou = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
part_ious[l - self.seg_classes[cat][0]] = iou
shape_ious[cat].append(np.mean(part_ious))
def get_miou(self,shape_ious):
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat]) if len(shape_ious[cat])>0 else 0
cls_miou = np.mean(list(shape_ious.values()))
ins_miou = np.mean(all_shape_ious)
ret = dict(shape_ious)
ret['cls'] = cls_miou
ret['ins'] = ins_miou
return ret
def test(self, dataloader, is_save=False):
self.eval()
correct = 0.
total = 0
ret=[]
with torch.no_grad():
for batch_idx, (inputs, _, pi, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
pi = pi.to(device=self.device_id,dtype=torch.float32)
outputs = self(inputs,pi,labels)
_, predicted = torch.max(outputs.data, 1) # (b,2048)
ret.append((inputs.cpu().numpy(),predicted.cpu().numpy(),labels.cpu().numpy()))
if batch_idx>10:
debugPrint(ret)
break
ret=zip(*ret)
ret=[np.concatenate(items) for items in ret]
debugPrint(ret)
for i in ret:
print(i.shape)
if is_save:
# with open('./{}__upsample_pred.txt'.format(self.name),'a+') as file:
# file.writelines(json.dumps(ret)+'\n')
scio.savemat('./{}__upsample_pred.mat'.format(self.name), {'pos':ret[0],
'pred_seg':ret[1],'label':ret[2]})
return ret
# 11.4
class TestKNNPISphereGeoNetSegment_fps(nn.Module):
def __init__(self, input_channels, class_nums=50,category_nums=16, device_id=0, initial_weights=True):
super(TestKNNPISphereGeoNetSegment_fps,self).__init__()
# self.knn_points = 16 # {8,16,32}
self.name='TestMeshPISphereGeoNetSegment_fps_2split_1120'
print(self.name)
self.input_channels = input_channels
self.class_nums = class_nums
self.category_nums = category_nums
self.knn_points = 16
self.device_id = device_id
self.best_score = 0.0
self.seg_classes = {
'Airplane': [0, 1, 2, 3],
'Bag': [4, 5],
'Cap': [6, 7],
'Car': [8, 9, 10, 11],
'Chair': [12, 13, 14, 15],
'Earphone': [16, 17, 18],
'Guitar': [19, 20, 21],
'Knife': [22, 23],
'Lamp': [24, 25, 26, 27],
'Laptop': [28, 29],
'Motorbike': [30, 31, 32, 33, 34, 35],
'Mug': [36, 37],
'Pistol': [38, 39, 40],
'Rocket': [41, 42, 43],
'Skateboard': [44, 45, 46],
'Table': [47, 48, 49]
}
self.seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in self.seg_classes.keys():
for label in self.seg_classes[cat]:
self.seg_label_to_cat[label] = cat
if initial_weights:
self.initialize_weights()
# self.FC0 = Fc(input_channels,[32],input_dim=3, bn='BN', activation_fn='relu')
self.FC1 = Fc(input_channels,[32,64,128],input_dim=4, bn='GN', activation_fn='relu')
# self.FC2 = Fc(input_channels,[64], bn='BN', activation_fn='relu')
self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
self.FC2_2 = Fc(32,[64], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[128,256], bn='GN', activation_fn='relu')
self.FC4 = Fc(256,[256,512], bn='GN', activation_fn='relu')
# self.geo1 = Fc(64,[128], bn='BN', activation_fn='relu')
# self.geo1 = Geoconv(64, 128, 32, 0.1, 0.2, bn=True)
# self.geo2 = Geoconv(256, 256, 64, 0.15, 0.3, bn=True)
self.geo1 = SphericalGeoconv(64, 128, 32, 0.1, 0.2, bn=True)
self.geo2 = SphericalGeoconv(256, 256, 64, 0.15, 0.3, bn=True)
##### 50*50
self.pi_conv=nn.Sequential(
nn.Conv2d(1,out_channels=4,kernel_size=4,stride=2,bias=False),#--->(b,4,23,23)
nn.BatchNorm2d(4),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2), #--->(b,4,23)
nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,11,11)
nn.BatchNorm2d(16),
# nn.GroupNorm(4, 8),
nn.ReLU(),
# nn.MaxPool2d(3, stride=2), #--->(b,16,11,11)
nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,5,5)
nn.BatchNorm2d(64),
# nn.GroupNorm(4, 16),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2) #--->(b,16,12)
nn.Conv2d(64,out_channels=128,kernel_size=3,stride=2,bias=False),#--->(b,128,2,2)
nn.BatchNorm2d(128),
# nn.GroupNorm(4, 16),
nn.ReLU()
)
self.classify_pi=Fc(512,[256,128], bn='GN', activation_fn='relu')
self.classify = nn.Sequential(
nn.Conv1d(3 + 128 + 64 + 128 + 256+ 512 + 128 + category_nums, 512, 1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Conv1d(512, class_nums, 1, bias=True)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud, pi, labels,knn_graph=None): #(B,C,N), (B,50,50), long, (B,N,32+1)
# debugPrint(knn_graph.size())
if knn_graph is None:
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
x = group_points(point_cloud, knn_graph[:, :, :16].contiguous()) # ---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
x=self.FC1(x) #------->(B,N,npoints,128)
x=torch.max(x,2,keepdim=False)[0] # b,n,128
xyz=point_cloud.transpose(1,2).contiguous()
b,n,c=xyz.size()
# point_cloud=self.FC2(point_cloud)
### begin ARPE
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,64)
### end ARPE
y0=y
y1=self.geo1(y,xyz) #---->(b,n,128)
sample_index = furthest_point_sample(xyz,1024).long()
sample_xyz = gather_nd(xyz, sample_index)
sample_y1 = gather_nd(y1, sample_index)
# debugPrint(sample_y1)
y2=self.FC3(sample_y1) #(b,n,256)
y3=self.geo2(y2,sample_xyz) #---->(b,n,256)
# y3=y2
upsample=True
if upsample:
dist, idx = three_nn(xyz, sample_xyz)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_y3 = three_interpolate(y3.transpose(1,2).contiguous(), idx, weight).transpose(1,2) # (b,1024,c)--->(b,2048,c)
# debugPrint(interpolated_y3.size())
y4=self.FC4(y3) #(B,N,512)
y=torch.max(y4, 1, keepdim=True)[0] # (b,1,512)
one_hot = torch.zeros([b, self.category_nums],device=self.device_id).scatter_(1, labels.view(-1, 1), 1) # (b,c)
pi=self.pi_conv(pi.unsqueeze(1)).view(b,-1)
pi=self.classify_pi(pi) ##--->(b,128)
y=torch.cat([xyz,x,y0,y1,interpolated_y3,y.repeat([1,n,1]), pi.unsqueeze(1).repeat([1,n,1]),one_hot.unsqueeze(1).repeat([1,n,1])],dim=2) #(b,n,3+128+64+256+512+128+16)
# y=torch.cat([xyz,y0,x,y1,interpolated_y3,y.repeat([1,n,1]), pi.unsqueeze(1).repeat([1,n,1]),one_hot.unsqueeze(1).repeat([1,n,1])],dim=2) #(b,n,3+128+64+256+512+128+16)
y= self.classify(y.transpose(1,2)) # -->(b,50,n)
return y #(b,50,2048)
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, knn_graph, pi, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id) # (b,3,2048)
knn_graph = knn_graph.cuda(self.device_id) #(b,n,32+1)
targets = targets.cuda(self.device_id) #(b,2018)
labels = labels.cuda(self.device_id) #(b,)
pi = pi.to(device=self.device_id,dtype=torch.float32)
self.optimizer.zero_grad()
outputs = self(inputs,pi,labels,knn_graph)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
# raise Exception
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader, is_save=False):
self.eval()
correct = 0.
total = 0
shape_ious = {cat:[] for cat in self.seg_classes.keys()}
with torch.no_grad():
for batch_idx, (inputs, knn_graph, pi, targets, labels) in enumerate(dataloader):
inputs = inputs.cuda(self.device_id)
knn_graph = knn_graph.cuda(self.device_id) #(b,n,32+1)
targets = targets.cuda(self.device_id)
labels = labels.cuda(self.device_id)
pi = pi.to(device=self.device_id,dtype=torch.float32)
outputs = self(inputs,pi,labels,knn_graph)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0) * targets.size(1)
correct += (predicted == targets).sum().item()
self.compute_miou(predicted.cpu().numpy(),targets.cpu().numpy(),shape_ious)
# if batch_idx>10:
# break
# debugPrint(shape_ious['Airplane'])
ret=self.get_miou(shape_ious) #{'cls':value,'ins':value}
print('cls_miou = {:4f}, ins_miou = {:4f}'.format(ret['cls'],ret['ins']))
print('Accuracy of the network: %.2f %%' % (100.0 * correct / total))
# is_save=False
if is_save:
with open('./{}_upsample_miou_1120.txt'.format(self.name),'a+') as file:
file.writelines(json.dumps(ret)+'\n')
if self.best_score<ret['ins']:
self.best_score=ret['ins']
torch.save(self, './model_param/{}_best_weight_1120_upsample.ckpt'.format(self.name))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def compute_miou(self,pred_label,true_label, shape_ious):
"""
pred_label: numpy array, (b,n), int
true_label: numpy array, (b,n), int
"""
batch_size=true_label.shape[0]
for bi in range(batch_size):
segp = pred_label[bi, :]
segl = true_label[bi, :]
cat = self.seg_label_to_cat[segl[0]]
part_ious = [0.0 for _ in range(len(self.seg_classes[cat]))]
for l in self.seg_classes[cat]:
if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0): # part is not present, no prediction as well
iou = 1.0
else:
iou = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
part_ious[l - self.seg_classes[cat][0]] = iou
shape_ious[cat].append(np.mean(part_ious))
def get_miou(self,shape_ious):
all_shape_ious = []
for cat in shape_ious.keys():
for iou in shape_ious[cat]:
all_shape_ious.append(iou)
shape_ious[cat] = np.mean(shape_ious[cat]) if len(shape_ious[cat])>0 else 0
cls_miou = np.mean(list(shape_ious.values()))
ins_miou = np.mean(all_shape_ious)
ret = dict(shape_ious)
ret['cls'] = cls_miou
ret['ins'] = ins_miou
return ret
class GeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(GeoNet,self).__init__()
self.knn_points = 32 # {8,16,32}
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[64,128,384],input_dim=4, bn='BN', activation_fn='relu')
self.FC2 = Fc(input_channels,[64], bn='BN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='BN', activation_fn='relu')
self.FC4 = Fc(768,[2048], bn='BN', activation_fn='relu')
self.geo1 = Geoconv(64, 128, 64, 0.05, 0.15, bn=True)
self.geo2 = Geoconv(256, 512, 64, 0.15, 0.3, bn=True)
self.geo3 = Geoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify = nn.Sequential(
nn.Linear(2048, 512,bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(0.5),
# nn.Linear(512, 128,bias=False),
# nn.BatchNorm1d(128),
# nn.ReLU(True),
# nn.Dropout(0.5),
# nn.Linear(128, class_nums)
nn.Linear(512, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud): #B,C,N
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points)
x = group_points(point_cloud, knn_graph) # ---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
# assert(x.size()==(16,64,16,12))
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
# x=x.transpose(2,3).contiguous().view(b*n,c,npoints)
# x=nn.MaxPool1D(npoints)(x) #--->(b*n,c,1)
# x=x.squeeze()
# x=x.view(b,n,c)
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
point_cloud=point_cloud.transpose(1,2)
# xyz=xyz.transpose(1,2)
xyz=point_cloud
y=self.FC2(point_cloud) #---->(b,n,64)
# assert(y.size()==(16,64,64))
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,2048)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,2048)
y= self.classify(y)
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
self.optimizer.zero_grad()
outputs = self(inputs)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
outputs = self(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class AdaptiveGeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(AdaptiveGeoNet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[64,128,384],input_dim=4, bn=True, activation_fn='relu')
self.FC2 = Fc(input_channels,[64], bn=True, activation_fn='relu')
self.FC3 = Fc(128,[256], bn=True, activation_fn='relu')
self.FC4 = Fc(768,[2048], bn=True, activation_fn='relu')
self.geo1 = Geoconv(64, 128, 64, 0.05, 0.15, bn=True)
self.geo2 = Geoconv(256, 512, 64, 0.15, 0.3, bn=True)
self.geo3 = Geoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify = nn.Sequential(
nn.Linear(2048, 512,bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(0.5),
# nn.Linear(512, 128,bias=False),
# nn.BatchNorm1d(128),
# nn.ReLU(True),
# nn.Dropout(0.5),
# nn.Linear(128, class_nums)
nn.Linear(512, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 20, 0.5)
self.cuda(device_id)
def forward(self, point_cloud,knn_graph): #B,C,N
# print(point_cloud.size())
# print(xyz.size())
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points)
x = group_points(point_cloud, knn_graph[:, :, :16].contiguous()) # ---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
# assert(x.size()==(16,64,16,12))
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
# x=x.transpose(2,3).contiguous().view(b*n,c,npoints)
# x=nn.MaxPool1D(npoints)(x) #--->(b*n,c,1)
# x=x.squeeze()
# x=x.view(b,n,c)
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
point_cloud=point_cloud.transpose(1,2)
# xyz=xyz.transpose(1,2)
xyz=point_cloud
y=self.FC2(point_cloud) #---->(b,n,64)
# assert(y.size()==(16,64,64))
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,2048)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,2048)
y= self.classify(y)
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
self.optimizer.zero_grad()
outputs = self(inputs,knn_graph)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
outputs = self(inputs,knn_graph)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class parall_GeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(parall_GeoNet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
# self.FC1 = Fc(input_channels,[64,128,384],input_dim=4, bn=True, activation_fn='relu')
self.FC2 = Fc(input_channels,[64], bn=True, activation_fn='relu')
self.FC3 = Fc(384,[512], bn=True, activation_fn='relu')
self.FC4 = Fc(1536,[1024], bn=True, activation_fn='relu')
self.geo1_1 = Geoconv(64, 128, 64, 0.05, 0.15, bn=True)
self.geo1_2 = Geoconv(64, 128, 64, 0.15, 0.3, bn=True)
self.geo1_3 = Geoconv(64, 128, 64, 0.3, 0.6, bn=True)
self.geo2_1 = Geoconv(512, 512, 128, 0.05, 0.15, bn=True)
self.geo2_2 = Geoconv(512, 512, 128, 0.15, 0.3, bn=True)
self.geo2_3 = Geoconv(512, 512, 128, 0.3, 0.6, bn=True)
# self.geo3 = Geoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify = nn.Sequential(
nn.Linear(1024, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
# nn.Linear(512, 128,bias=False),
# nn.BatchNorm1d(128),
# nn.ReLU(True),
# nn.Dropout(0.5),
# nn.Linear(128, class_nums)
nn.Linear(256, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 20, 0.5)
self.cuda(device_id)
def forward(self, point_cloud): #B,C,N
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points)
# x = group_points(point_cloud, knn_graph) # ---> (B,c,N,npoints)
# x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
# x=self.FC1(x) #------->(B,N,npoints,384)
# b,n,npoints,c=x.size()
# x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
point_cloud=point_cloud.transpose(1,2)
xyz=point_cloud
y=self.FC2(point_cloud) #---->(b,n,64)
# assert(y.size()==(16,64,64))
y_1=self.geo1_1(y,xyz) #---->(b,n,128)
y_2=self.geo1_2(y,xyz)
y_3=self.geo1_3(y,xyz)
y=torch.cat([y_1,y_2,y_3],2) #--->(b,n,384)
y=self.FC3(y) #----->(b,n,512)
y_1=self.geo2_1(y,xyz) #---->(b,n,512)
y_2=self.geo2_2(y,xyz) #---->(b,n,512)
y_3=self.geo2_3(y,xyz) #---->(b,n,512)
y=torch.cat([y_1,y_2,y_3],2) #---->(b,n,896)
# y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,1024)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,1024)
y= self.classify(y)
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
self.optimizer.zero_grad()
outputs = self(inputs)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
outputs = self(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
## add SphericalLinear module/layer into the Geo_net
class SphericalGeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(SphericalGeoNet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[64,128,384],input_dim=4, bn='BN', activation_fn='relu')
self.FC2 = Fc(input_channels,[64], bn='BN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='BN', activation_fn='relu')
self.FC4 = Fc(768,[2048], bn='BN', activation_fn='relu')
self.geo1 = SphericalGeoconv(64, 128, 64, 0.05, 0.15, bn=True)
self.geo2 = SphericalGeoconv(256, 512, 64, 0.15, 0.3, bn=True)
self.geo3 = SphericalGeoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify = nn.Sequential(
nn.Linear(2048, 512,bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(0.5),
# nn.Linear(512, 128,bias=False),
# nn.BatchNorm1d(128),
# nn.ReLU(True),
# nn.Dropout(0.5),
# nn.Linear(128, class_nums)
nn.Linear(512, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud): #B,C,N
# print(point_cloud.size())
# print(xyz.size())
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points)
x = group_points(point_cloud, knn_graph) # ---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
# assert(x.size()==(16,64,16,12))
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
# x=x.transpose(2,3).contiguous().view(b*n,c,npoints)
# x=nn.MaxPool1D(npoints)(x) #--->(b*n,c,1)
# x=x.squeeze()
# x=x.view(b,n,c)
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
point_cloud=point_cloud.transpose(1,2)
# xyz=xyz.transpose(1,2)
xyz=point_cloud
y=self.FC2(point_cloud) #---->(b,n,64)
# assert(y.size()==(16,64,64))
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,2048)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,2048)
y= self.classify(y)
# print(self.geo1.perceptron_feat.weight[0])
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
# knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
self.optimizer.zero_grad()
outputs = self(inputs)#,knn_graph)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
# knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
outputs = self(inputs)#,knn_graph)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class TestKNNGeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(TestKNNGeoNet,self).__init__()
self.knn_points = 32
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,128],input_dim=4, bn='BN', activation_fn='relu')
# self.FC1 = Arpe()
self.FC2 = Fc(input_channels,[64], bn='BN', activation_fn='relu')
# self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
# self.FC2_2 = Fc(32,[128], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='BN', activation_fn='relu')
self.FC4 = Fc(640,[1024], bn='BN', activation_fn='relu')
self.geo1 = Geoconv(64, 128, 64, 0.1, 0.2, bn=True)
self.geo2 = Geoconv(256, 512, 64, 0.15, 0.3, bn=True)
# self.geo3 = SphericalGeoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify = nn.Sequential(
nn.Linear(1024, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(256, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
debugPrint(self.knn_points)
self.cuda(device_id)
def forward(self, point_cloud, knn_graph): #B,C,N
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points)
# x = group_points(point_cloud, knn_graph) # ---> (B,c,N,npoints)
x = group_points(point_cloud, knn_graph[:, :, 1:].contiguous()) #---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
point_cloud=point_cloud.transpose(1,2)
xyz=point_cloud
y=self.FC2(point_cloud) #---->(b,n,64)
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
# y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,1024)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,1024)
y= self.classify(y)
# print(self.geo1.perceptron_feat.weight[0])
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, _, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graphs, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
knn_graphs = knn_graphs.to(self.device_id, dtype=torch.int)
targets = targets.cuda(self.device_id)
self.optimizer.zero_grad()
outputs = self(inputs,knn_graphs)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
for batch_idx, (inputs, knn_graphs, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
knn_graphs = knn_graphs.to(self.device_id, dtype=torch.int)
targets = targets.cuda(self.device_id)
outputs = self(inputs,knn_graphs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class TestBallSplitGeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(TestBallSplitGeoNet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,128],input_dim=4, bn='GN', activation_fn='relu')
# self.FC1 = Arpe()
self.FC2 = Fc(input_channels,[64], bn='GN', activation_fn='relu')
# self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
# self.FC2_2 = Fc(32,[128], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='GN', activation_fn='relu')
self.FC4 = Fc(640,[1024], bn='BN', activation_fn='relu')
self.geo1 = SphericalGeoconv(64, 128, 64, 0.1, 0.2, bn=True)
self.geo2 = SphericalGeoconv(256, 512, 64, 0.15, 0.3, bn=True)
# self.geo3 = SphericalGeoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify = nn.Sequential(
nn.Linear(1024, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(256, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud): #B,C,N
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points)
x = group_points(point_cloud, knn_graph) # ---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
# ### add ARPE
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
# y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
# y = y - point_cloud.unsqueeze(3)
# y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
# y=y.transpose(1,3).contiguous().view(b,-1,3)
# y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
# y=y.view(b,-1,n,32)
# y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
# y=self.FC2_2(y) #--->(b,n,64)
# ### END add
# debugPrint(point_cloud.size())
point_cloud=point_cloud.transpose(1,2)
xyz=point_cloud
y=self.FC2(point_cloud) #---->(b,n,64)
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
# y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,1024)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,1024)
y= self.classify(y)
# print(self.geo1.perceptron_feat.weight[0])
return y
# point_cloud=point_cloud.transpose(1,2)
# # xyz=xyz.transpose(1,2)
# xyz=point_cloud
# y=self.FC2(point_cloud) #---->(b,n,64)
# # assert(y.size()==(16,64,64))
# y=self.geo1(y,xyz) #---->(b,n,128)
# y=self.FC3(y) #----->(b,n,256)
# y=self.geo2(y,xyz) #---->(b,n,512)
# y=torch.cat([y,x],2) #---->(b,n,896)
# y=self.geo3(y,xyz) #---->(b,n,768)
# y=self.FC4(y) #----->(b,n,2048)
# y=torch.max(y, 1, keepdim=False)[0] #--->(b,2048)
# y= self.classify(y)
# return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
self.optimizer.zero_grad()
outputs = self(inputs)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
outputs = self(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class knnSphericalGeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(knnSphericalGeoNet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,128],input_dim=4, bn='GN', activation_fn='relu')
# self.FC2 = Fc(input_channels,[64], bn=True, activation_fn='relu')
self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
self.FC2_2 = Fc(32,[128], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='GN', activation_fn='relu')
self.FC4 = Fc(640,[1024], bn='GN', activation_fn='relu')
self.geo1 = SphericalGeoconv(128, 128, 64, 0.1, 0.2, bn=True)
self.geo2 = SphericalGeoconv(256, 512, 64, 0.2, 0.3, bn=True)
# self.geo1 = Geoconv(128, 128, 64, 0.1, 0.2, bn=True)
# self.geo2 = Geoconv(256, 512, 64, 0.2, 0.3, bn=True)
#self.geo3 = SphericalGeoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify = nn.Sequential(
nn.Linear(1024, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
# nn.Linear(512, 128,bias=False),
# nn.BatchNorm1d(128),
# nn.ReLU(True),
# nn.Dropout(0.5),
# nn.Linear(128, class_nums)
nn.Linear(256, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud,knn_graph): #B,C,N
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points)
x = group_points(point_cloud, knn_graph[:, :, 1:].contiguous()) #---> (B,c,N,npoints)
##add ARPE
# x = x - point_cloud.unsqueeze(3)
# x=torch.cat([point_cloud.unsqueeze(3),x],3) #---->(b,c,n,1+npoints)
##add
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
# assert(x.size()==(32,1024,17,3))
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
# x=x.transpose(2,3).contiguous().view(b*n,c,npoints)
# x=nn.MaxPool1D(npoints)(x) #--->(b*n,c,1)
# x=x.squeeze()
# x=x.view(b,n,c)
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
### add ARPE
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,64)
### add
# point_cloud=point_cloud.transpose(1,2)
xyz=point_cloud.transpose(1,2)
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
#y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,2048)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,2048)
y= self.classify(y)
# print(self.geo1.perceptron_feat.weight[0])
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
self.optimizer.zero_grad()
outputs = self(inputs,knn_graph)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
outputs = self(inputs,knn_graph)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
###ZHUYIJIE 2019.5.30
class knnPD1SphericalGeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(knnPD1SphericalGeoNet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,128],input_dim=4, bn='GN', activation_fn='relu')
# self.FC1 = Arpe()
# self.FC2 = Fc(input_channels,[64], bn=True, activation_fn='relu')
self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
self.FC2_2 = Fc(32,[128], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='GN', activation_fn='relu')
self.FC4 = Fc(640,[1024], bn='BN', activation_fn='relu')
self.geo1 = SphericalGeoconv(128, 128, 64, 0.1, 0.2, bn=True)
self.geo2 = SphericalGeoconv(256, 512, 64, 0.15, 0.3, bn=True)
# self.geo3 = SphericalGeoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify = nn.Sequential(
nn.Linear(1024+64, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
# nn.Linear(512, 128,bias=False),
# nn.BatchNorm1d(128),
# nn.ReLU(True),
# nn.Dropout(0.5),
# nn.Linear(128, class_nums)
nn.Linear(256, class_nums)
)
# self.classify = nn.Sequential(
# nn.Dropout(0.5),
# nn.Linear(1024, class_nums)
# )
self.pd1_conv=nn.Sequential(
nn.Conv1d(1,out_channels=4,kernel_size=5,stride=2),#--->(b,4,48)
nn.ReLU(),
nn.MaxPool1d(3, stride=2), #--->(b,4,23)
nn.Conv1d(4,out_channels=16,kernel_size=5,stride=2,bias=False),#--->(b,16,10)
nn.BatchNorm1d(16),
nn.ReLU(),
nn.MaxPool1d(3, stride=2) #--->(b,16,4)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud,knn_graph, pd1): #B,C,N
"""
inputs:
pd1: float32 tensor shape=(b,100)
"""
pd1=pd1.unsqueeze(1)
pd1=self.pd1_conv(pd1)
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points)
x = group_points(point_cloud, knn_graph[:, :, 1:].contiguous()) #---> (B,c,N,npoints)
##add ARPE
# x = x - point_cloud.unsqueeze(3)
# x=torch.cat([point_cloud.unsqueeze(3),x],3) #---->(b,c,n,1+npoints)
##add
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
# assert(x.size()==(32,1024,17,3))
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
# x=x.transpose(2,3).contiguous().view(b*n,c,npoints)
# x=nn.MaxPool1D(npoints)(x) #--->(b*n,c,1)
# x=x.squeeze()
# x=x.view(b,n,c)
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
### add ARPE
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,64)
### add
# point_cloud=point_cloud.transpose(1,2)
xyz=point_cloud.transpose(1,2)
# y=self.FC2(point_cloud) #---->(b,n,64)
# assert(y.size()==(16,64,64))
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
# y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,2048)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,2048)
pd1=pd1.view(b,-1)
y=torch.cat([y,pd1],1) #2019.5.30 -->(b,1024+64)
y= self.classify(y)
# print(self.geo1.perceptron_feat.weight[0])
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, pd1, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pd1 = pd1.to(torch.device('cuda:0'),dtype=torch.float32)
self.optimizer.zero_grad()
outputs = self(inputs,knn_graph,pd1)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, pd1, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pd1 = pd1.to(torch.device('cuda:0'),dtype=torch.float32)
outputs = self(inputs,knn_graph,pd1)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
###ZHUYIJIE 2019.6.7
class knnPISphericalGeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(knnPISphericalGeoNet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,128],input_dim=4, bn='GN', activation_fn='relu')
self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
self.FC2_2 = Fc(32,[128], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='GN', activation_fn='relu')
self.FC4 = Fc(640,[1024], bn='BN', activation_fn='relu')
self.geo1 = SphericalGeoconv(128, 128, 64, 0.1, 0.2, bn=True)
self.geo2 = SphericalGeoconv(256, 512, 64, 0.15, 0.3, bn=True)
# self.geo3 = SphericalGeoconv(896, 768, 64, 0.3, 0.6, bn=True)
# self.classify_1 = nn.Sequential(
# nn.Linear(1024, 256,bias=False),
# nn.BatchNorm1d(256),
# nn.ReLU(True)
# )
# self.classify_2=nn.Sequential(
# nn.Dropout(0.5),
# # nn.Linear(512, 128,bias=False),
# # nn.BatchNorm1d(128),
# # nn.ReLU(True),
# # nn.Dropout(0.5),
# # nn.Linear(128, class_nums)
# nn.Linear(256+64, class_nums)
# )
self.classify = nn.Sequential(
# nn.Dropout(0.3), ##new add
nn.Linear(1024+512, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
# nn.Linear(512, 128,bias=False),
# nn.BatchNorm1d(128),
# nn.ReLU(True),
# nn.Dropout(0.5),
# nn.Linear(128, class_nums)
nn.Linear(256, class_nums)
)
##### 50*50
self.pi_conv=nn.Sequential(
nn.Conv2d(1,out_channels=4,kernel_size=4,stride=2,bias=False),#--->(b,4,23,23)
nn.BatchNorm2d(4),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2), #--->(b,4,23)
nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,11,11)
nn.BatchNorm2d(16),
# nn.GroupNorm(4, 8),
nn.ReLU(),
# nn.MaxPool2d(3, stride=2), #--->(b,16,11,11)
nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,5,5)
nn.BatchNorm2d(64),
# nn.GroupNorm(4, 16),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2) #--->(b,16,12)
nn.Conv2d(64,out_channels=128,kernel_size=3,stride=2,bias=False),#--->(b,256,2,2)
nn.BatchNorm2d(128),
# nn.GroupNorm(4, 16),
nn.ReLU()
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, point_cloud,knn_graph, pi): #B,C,N
"""
inputs:
point_cloud: float32 tensor shape=(b, 3, n)
knn_graph: int32 tensor shape=(b, n, 17)
pi: float32 tensor shape=(b,50,50)
"""
pi=pi.unsqueeze(1)
pi=self.pi_conv(pi)
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points)
x = group_points(point_cloud, knn_graph[:, :, 1:].contiguous()) #---> (B,c,N,npoints)
##add ARPE
# x = x - point_cloud.unsqueeze(3)
# x=torch.cat([point_cloud.unsqueeze(3),x],3) #---->(b,c,n,1+npoints)
##add
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
# assert(x.size()==(32,1024,17,3))
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
### add ARPE
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,64)
### end add ARPE
# point_cloud=point_cloud.transpose(1,2)
xyz=point_cloud.transpose(1,2)
# y=self.FC2(point_cloud) #---->(b,n,64)
# assert(y.size()==(16,64,64))
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
# y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,2048)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,2048)
pi=pi.view(b,-1)
y=torch.cat([y,pi],1) #2019.6.7 -->(b,1024+128*2*2)
y= self.classify(y)
# print(self.geo1.perceptron_feat.weight[0])
# y=self.classify_1(y)
# y=torch.cat([y,pd1],1)##--->(b,256+64)
# y=self.classify_2(y)
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer=None):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, pi, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pi = pi.to(torch.device('cuda:0'),dtype=torch.float32)
self.optimizer.zero_grad()
outputs = self(inputs,knn_graph,pi)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, pi, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pi = pi.to(torch.device('cuda:0'),dtype=torch.float32)
outputs = self(inputs,knn_graph,pi)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
###zhuyijie 2019.6.8
class pretrained_knnPISphericalGeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(pretrained_knnPISphericalGeoNet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
self.best_score=0
self.name='pretrained_knnPISphericalGeoNet_concate'
print(self.name)
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,128],input_dim=4, bn='GN', activation_fn='relu')
#ARPE
self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
self.FC2_2 = Fc(32,[128], bn='GN', activation_fn='relu')
self.FC2_second = Fc(input_channels,[32,128], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='GN', activation_fn='relu')
self.FC4 = Fc(640,[1024], bn='BN', activation_fn='relu')
self.FC4_second = Fc(512,[1024], bn='BN', activation_fn='relu')
self.geo1 = SphericalGeoconv(128, 128, 64, 0.1, 0.2, bn=True)
self.geo2 = SphericalGeoconv(256, 512, 64, 0.15, 0.3, bn=True)
# self.geo3 = SphericalGeoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify_kc = nn.Sequential(
nn.Linear(1024, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True)
)
self.classify_first = nn.Sequential(
nn.Linear(128, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(256, class_nums)
)
self.classify_second = nn.Sequential(
nn.Linear(1024, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(256, class_nums)
)
self.classify_pi=nn.Sequential(
# nn.Dropout(0.5),
nn.Linear(512, 256, bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True)
)
self.classify = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(256, class_nums)
)
self.classify_new = nn.Sequential(
# nn.Linear(256+256, class_nums)
nn.Dropout(0.5),
nn.Linear(256+256, class_nums)
)
##### 50*50
self.pi_conv=nn.Sequential(
nn.Conv2d(1,out_channels=4,kernel_size=4,stride=2,bias=False),#--->(b,4,23,23)
nn.BatchNorm2d(4),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2), #--->(b,4,23)
nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,11,11)
nn.BatchNorm2d(16),
# nn.GroupNorm(4, 8),
nn.ReLU(),
# nn.MaxPool2d(3, stride=2), #--->(b,16,11,11)
nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,5,5)
nn.BatchNorm2d(64),
# nn.GroupNorm(4, 16),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2) #--->(b,16,12)
nn.Conv2d(64,out_channels=128,kernel_size=3,stride=2,bias=False),#--->(b,256,2,2)
nn.BatchNorm2d(128),
# nn.GroupNorm(4, 16),
nn.ReLU()
)
self.criterion = nn.CrossEntropyLoss()
# self.optimizer = optim.Adam(self.parameters(),lr=1e-5, weight_decay=1e-5)
# self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
####2019.6.7
self.kcparams=[]
for name, param in self.named_parameters():
if 'pi' not in name:
if 'classify' not in name:
self.kcparams.append(param)
self.classify_param=[]
for name, param in self.named_parameters():
if 'classify' in name:
self.classify_param.append(param)
self.optimizer1 = optim.Adam([{'params':self.classify_pi.parameters()},
{'params':self.classify.parameters()},
{'params':self.pi_conv.parameters()}],
weight_decay=1e-5)
self.schedule1 = optim.lr_scheduler.StepLR(self.optimizer1, 10, 0.6)
# self.optimizer2 = optim.Adam(self.kcparams, weight_decay=1e-5)
self.optimizer2 = optim.Adam([{'params':self.kcparams},
{'params':self.classify_kc.parameters()},
{'params': self.classify.parameters()}],
lr=1e-3,weight_decay=1e-5)
self.schedule2 = optim.lr_scheduler.StepLR(self.optimizer2, 10, 0.6)
self.optimizer3 = optim.Adam([{'params':self.kcparams, 'lr':1e-5},
{'params':self.classify_param, 'lr':1e-3},
{'params':self.pi_conv.parameters(),'lr':1e-5}],
lr=1e-5, weight_decay=1e-5)
self.schedule3 = optim.lr_scheduler.StepLR(self.optimizer3, 6, 0.6)
## 2.3
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
##
self.cuda(device_id)
# @profile
def forward(self, point_cloud,knn_graph, pi): #B,C,N
"""
inputs:
point_cloud: float32 tensor shape=(b, 3, n)
knn_graph: int32 tensor shape=(b, n, 17)
pi: float32 tensor shape=(b,50,50)
"""
y, _ = self.forward_kc(point_cloud, knn_graph)
b=pi.size(0)
pi=self.pi_conv(pi.unsqueeze(1))
pi=self.classify_pi(pi.view(b,-1)) ##--->(b,256)
# y=torch.div(torch.add(y,1,pi),0.5) ##pretrained_knnPISphericalGeoNet
# y=y.mul(pi)
# y=torch.add(y,1,pi)
y=torch.cat([y,pi],1) #2019.6.7 -->(b,256+128*2*2)
# y=torch.div(torch.add(pi,1,y),2)
# y=torch.max(y,pi)
# y=F.relu(y)
# y= self.classify(y)
y= self.classify_new(y)
return y
def forward_pi(self,pi):
b,h,w=pi.size()
pi=pi.unsqueeze(1)
pi=self.pi_conv(pi)
pi=pi.view(b,-1)
# y=torch.cat([y,pi],1) #2019.6.7 -->(b,1024+128*2*2)
outputs=self.classify_pi(pi)
outputs= self.classify(outputs)
return outputs
def forward_kc(self, point_cloud, knn_graph=None):
if knn_graph is None:
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
# x = group_points(point_cloud, knn_graph)
x = group_points(point_cloud, knn_graph[:, :, :16].contiguous()) #---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
### begin ARPE
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,64)
##end ARPE
xyz=point_cloud.transpose(1,2)
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
# y=self.geo3(y,xyz) #---->(b,n,768)
y=self.FC4(y) #----->(b,n,2048)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,1024)
y=self.classify_kc(y) #2019.6.7 --->(b,256)
outputs= self.classify(y)
return y, outputs
def forward_first(self, point_cloud, knn_graph=None):
if knn_graph is None:
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
# x = group_points(point_cloud, knn_graph)
x = group_points(point_cloud, knn_graph[:, :, :16].contiguous()) #---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
# y=torch.cat([y,x],2) #---->(b,n,896)
# y=self.FC4(y) #----->(b,n,1024)
y=x
y=torch.max(y, 1, keepdim=False)[0] #--->(b,128)
y=self.classify_first(y) #2019.6.7 --->(b,256)
return y
def forward_second(self, point_cloud, knn_graph=None):
if knn_graph is None:
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
# x = group_points(point_cloud, knn_graph)
b,c,n=point_cloud.size()
# debugPrint(point_cloud.size())
### begin ARPE
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,128)
##end ARPE
xyz=point_cloud.transpose(1,2)
# y = self.FC2_second(xyz) # 2.4, 3--->32---->128
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=self.FC4_second(y) #(x,x,512)----->(b,n,1024)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,1024)
y=self.classify_second(y) #2019.6.7 --->(b,256)
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit_pi(self, dataloader, epoch, writer=None):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule1 is not None:
self.schedule1.step()
# print('----------epoch %d start train pi----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (pi, targets) in enumerate(dataloader): #zhuyijie
# inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
# knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pi = pi.to(device=self.device_id,dtype=torch.float32)
self.optimizer1.zero_grad()
# outputs = self(inputs,knn_graph,pi)
outputs= self.forward_pi(pi)
##
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer1.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
# print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
# print('-----------epoch %d end train pi-----------' % epoch)
print('train pi epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
def fit_kc(self, dataloader, epoch, writer=None):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule2 is not None:
self.schedule2.step()
print('----------epoch %d start train kc----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(device=self.device_id,dtype=torch.int)
self.optimizer2.zero_grad()
_, outputs= self.forward_kc(inputs, knn_graph)
###
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer2.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train kc-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
def fit_second(self, dataloader, epoch, writer=None):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train second----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(device=self.device_id,dtype=torch.int)
self.optimizer.zero_grad()
outputs= self.forward_second(inputs, knn_graph)
###
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train second-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
def fit(self, dataloader, epoch, writer=None):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
for param_group in self.optimizer3.param_groups:
print("current learning rate={}".format(param_group['lr']))
if self.schedule3 is not None:
self.schedule3.step()
print('----------epoch %d start train----------' % epoch)
for batch_idx, (inputs, knn_graph, pi, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(device=self.device_id,dtype=torch.int)
pi = pi.to(device=self.device_id,dtype=torch.float32)
# inputs, knn_graph, pi, targets = dataloader.next()
# print(targets)
# batch_idx = -1
# while inputs is not None:
# batch_idx += 1
self.optimizer3.zero_grad()
outputs = self(inputs,knn_graph,pi)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer3.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
# inputs, knn_graph, pi, targets = dataloader.next()
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score_pi(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (pi, targets) in enumerate(dataloader): #zhuyijie
# inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
# knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pi = pi.to(device=self.device_id,dtype=torch.float32)
outputs = self.forward_pi(pi)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the PI network: %.2f %%' % (100.0 * correct / total))
return correct / total
def score_kc(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(device=self.device_id,dtype=torch.int)
_, outputs = self.forward_kc(inputs,knn_graph)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
# print('Accuracy of the KC network: %.2f %%' % (100.0 * correct / total))
score = 100.0 * correct / total
print('Accuracy of the KC network: %.2f %%' % score)
if score>self.best_score:
self.best_score=score
print('------- The best score is: %.2f %%' % self.best_score)
return correct / total
def score_second(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(device=self.device_id,dtype=torch.int)
outputs = self.forward_second(inputs,knn_graph)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
score = 100.0 * correct / total
print('Accuracy of the KC network: %.2f %%' % score)
if score>self.best_score:
self.best_score=score
print('------- The best score is: %.2f %%' % self.best_score)
return correct / total
# @profile
def score(self, dataloader,is_save=False):
self.eval()
correct = 0.
total = 0
for param_group in self.optimizer3.param_groups:
print("current learning rate={}".format(param_group['lr']))
with torch.no_grad():
for batch_idx, (inputs, knn_graph, pi, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(device=self.device_id,dtype=torch.int)
pi = pi.to(device=self.device_id,dtype=torch.float32)
# inputs, knn_graph, pi, targets = dataloader.next()
# iteration = 0
# while inputs is not None:
# iteration += 1
# 训练代码
outputs = self(inputs,knn_graph,pi)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
# inputs, knn_graph, pi, targets = dataloader.next()
score=correct / total
print('Accuracy of the total network: %.2f %%' % (100.0 * score))
if is_save:
if self.best_score<score:
self.best_score=score
torch.save(self, './model_param/{}_best_weight_1120.ckpt'.format(self.name))
return score
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
###zhuyijie 2019.11.14
class first_two_knnPISphericalGeoNet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(first_two_knnPISphericalGeoNet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
self.best_score=0
self.name='first_two_knnPISphericalGeoNet'
print(self.name)
if initial_weights:
self.initialize_weights()
self.FC1 = Fc(input_channels,[32,64,128],input_dim=4, bn='GN', activation_fn='relu')
#ARPE
self.FC2_1 = Fc(input_channels,[32], bn='GN', activation_fn='relu')
self.FC2_2 = Fc(32,[128], bn='GN', activation_fn='relu')
self.FC3 = Fc(128,[256], bn='GN', activation_fn='relu')
self.FC4 = Fc(640,[1024], bn='BN', activation_fn='relu')
# self.geo1 = SphericalGeoconv(128, 128, 64, 0.1, 0.2, bn=True)
# self.geo2 = SphericalGeoconv(256, 512, 64, 0.15, 0.3, bn=True)
self.geo1 = Geoconv(128, 128, 64, 0.1, 0.2, bn=True)
self.geo2 = Geoconv(256, 512, 64, 0.15, 0.3, bn=True)
# self.geo3 = SphericalGeoconv(896, 768, 64, 0.3, 0.6, bn=True)
self.classify_kc = nn.Sequential(
nn.Linear(1024, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(256, class_nums)
)
# self.classify_pi=nn.Sequential(
# # nn.Dropout(0.5),
# nn.Linear(512, 256, bias=False),
# nn.BatchNorm1d(256),
# nn.ReLU(True)
# )
# self.classify = nn.Sequential(
# nn.Dropout(0.5),
# nn.Linear(256, class_nums)
# )
##### 50*50
# self.pi_conv=nn.Sequential(
# nn.Conv2d(1,out_channels=4,kernel_size=4,stride=2,bias=False),#--->(b,4,23,23)
# nn.BatchNorm2d(4),
# nn.ReLU(),
# # nn.MaxPool1d(3, stride=2), #--->(b,4,23)
# nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,11,11)
# nn.BatchNorm2d(16),
# # nn.GroupNorm(4, 8),
# nn.ReLU(),
# # nn.MaxPool2d(3, stride=2), #--->(b,16,11,11)
# nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,5,5)
# nn.BatchNorm2d(64),
# # nn.GroupNorm(4, 16),
# nn.ReLU(),
# # nn.MaxPool1d(3, stride=2) #--->(b,16,12)
# nn.Conv2d(64,out_channels=128,kernel_size=3,stride=2,bias=False),#--->(b,256,2,2)
# nn.BatchNorm2d(128),
# # nn.GroupNorm(4, 16),
# nn.ReLU()
# )
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward_kc(self, point_cloud, knn_graph=None):
knn_graph=None
if knn_graph is None:
knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
# debugPrint(knn_graph.size())
x = group_points(point_cloud, knn_graph[:, :, :16].contiguous()) #---> (B,c,N,npoints)
x=x.permute(0,2,3,1) #---->(B,N,npoints,c)
x=self.FC1(x) #------->(B,N,npoints,384)
b,n,npoints,c=x.size()
x=torch.max(x, 2, keepdim=False)[0] #--->(b,n,384)
### begin ARPE
# knn_graph, _ = batch_knn(point_cloud, point_cloud.clone(), self.knn_points*2)
y = group_points(point_cloud, knn_graph[:, :, 1:].contiguous())
y = y - point_cloud.unsqueeze(3)
y=torch.cat([point_cloud.unsqueeze(3),y],3) #---->(b,c,n,1+npoints)
y=y.transpose(1,3).contiguous().view(b,-1,3)
y=self.FC2_1(y) #--->(b,(1+npoints)*n,32)
y=y.view(b,-1,n,32)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,n,32)
y=self.FC2_2(y) #--->(b,n,64)
##end ARPE
xyz=point_cloud.transpose(1,2)
y=self.geo1(y,xyz) #---->(b,n,128)
y=self.FC3(y) #----->(b,n,256)
y=self.geo2(y,xyz) #---->(b,n,512)
y=torch.cat([y,x],2) #---->(b,n,896)
y=self.FC4(y) #----->(b,n,2048)
y=torch.max(y, 1, keepdim=False)[0] #--->(b,2048)
y=self.classify_kc(y) #2019.11 --->(b,40)
# outputs= self.classify(y)
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit_kc(self, dataloader, epoch, writer=None):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train kc----------' % epoch)
for batch_idx, (inputs, knn_graph, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(device=self.device_id,dtype=torch.int)
self.optimizer.zero_grad()
outputs= self.forward_kc(inputs, knn_graph)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0:
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
print('-----------epoch %d end train kc-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
def score_kc(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, knn_graph, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(device=self.device_id,dtype=torch.int)
outputs = self.forward_kc(inputs,knn_graph)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the KC network: %.2f %%' % (100.0 * correct / total))
return correct / total
# @profile
"""
def score(self, dataloader,is_save=False):
self.eval()
correct = 0.
total = 0
for param_group in self.optimizer3.param_groups:
print("current learning rate={}".format(param_group['lr']))
with torch.no_grad():
for batch_idx, (inputs, knn_graph, pi, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
knn_graph = knn_graph.to(device=self.device_id,dtype=torch.int)
pi = pi.to(device=self.device_id,dtype=torch.float32)
# inputs, knn_graph, pi, targets = dataloader.next()
# iteration = 0
# while inputs is not None:
# iteration += 1
# 训练代码
outputs = self(inputs,knn_graph,pi)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
# inputs, knn_graph, pi, targets = dataloader.next()
score=correct / total
print('Accuracy of the total network: %.2f %%' % (100.0 * score))
if is_save:
if self.best_score<score:
self.best_score=score
torch.save(self, './model_param/{}_best_weight_64.ckpt'.format(self.name))
return score
"""
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
###ZHUYIJIE 2019.5.30
class PD1Net(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(PD1Net,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.classify = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(800, class_nums)
)
self.pd1_conv=nn.Sequential(
nn.Conv1d(1,out_channels=4,kernel_size=2,stride=2),#--->(b,4,200)
nn.ReLU(),
# nn.MaxPool1d(3, stride=2), #--->(b,4,23)
nn.Conv1d(4,out_channels=8,kernel_size=2,stride=2,bias=False),#--->(b,8,100)
# nn.BatchNorm1d(16),
nn.GroupNorm(4, 8),
nn.ReLU(),
nn.Conv1d(8,out_channels=16,kernel_size=2,stride=2,bias=False),#--->(b,16,50)
# nn.BatchNorm1d(16),
nn.GroupNorm(4, 16),
nn.ReLU()
# nn.MaxPool1d(3, stride=2) #--->(b,16,12)
)
# self.pd1_conv=nn.Sequential(
# nn.Conv1d(1,out_channels=4,kernel_size=2,stride=2),#--->(b,4,50)
# nn.ReLU(),
# # nn.MaxPool1d(3, stride=2), #--->(b,4,23)
# nn.Conv1d(4,out_channels=16,kernel_size=2,stride=2,bias=False),#--->(b,16,25)
# # nn.BatchNorm1d(16),
# nn.GroupNorm(4, 16),
# nn.ReLU()
# # nn.MaxPool1d(3, stride=2) #--->(b,16,12)
# )
# self.pd1_conv=nn.Sequential(
# nn.Conv1d(1,out_channels=4,kernel_size=5,stride=2),#--->(b,4,48)
# nn.ReLU(),
# nn.MaxPool1d(3, stride=2), #--->(b,4,23)
# nn.Conv1d(4,out_channels=16,kernel_size=5,stride=2,bias=False),#--->(b,16,10)
# # nn.BatchNorm1d(16),
# nn.GroupNorm(4, 16),
# nn.ReLU()
# # nn.MaxPool1d(3, stride=2) #--->(b,16,4)
# )
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, pd1): #B,C,N
"""
inputs:
pd1: float32 tensor shape=(b,100)
"""
b,c=pd1.size()
pd1=pd1.unsqueeze(1)
pd1=self.pd1_conv(pd1)
pd1=pd1.view(b,-1)
y= self.classify(pd1)
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer=None):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
# print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (pd1, targets) in enumerate(dataloader): #zhuyijie
# inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
# knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pd1 = pd1.to(torch.device('cuda:0'),dtype=torch.float32)
self.optimizer.zero_grad()
outputs = self(pd1)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
# print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
# print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (pd1, targets) in enumerate(dataloader): #zhuyijie
# inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
# knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pd1 = pd1.to(torch.device('cuda:0'),dtype=torch.float32)
outputs = self(pd1)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
###ZHUYIJIE 2019.5.30
class PINet(nn.Module):
def __init__(self, input_channels, class_nums=1, device_id=0, initial_weights=True):
super(PINet,self).__init__()
self.knn_points = 16
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
if initial_weights:
self.initialize_weights()
self.classify = nn.Sequential(
# nn.Linear(1024, 256,bias=False),
# nn.BatchNorm1d(256),
# nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(512, class_nums)
# nn.Dropout(0.5),
# nn.Linear(128*4, class_nums)
)
##### 20*20
# self.pi_conv=nn.Sequential(
# nn.Conv2d(1,out_channels=4,kernel_size=2,stride=1,bias=False),#--->(b,4,19,19)
# nn.BatchNorm2d(4),
# nn.ReLU(),
# # nn.MaxPool1d(3, stride=2), #--->(b,4,23)
# nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,9,9)
# nn.BatchNorm2d(16),
# # nn.GroupNorm(4, 8),
# nn.ReLU(),
# nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,4,4)
# nn.BatchNorm2d(64),
# # nn.GroupNorm(4, 16),
# nn.ReLU(),
# # nn.MaxPool1d(3, stride=2) #--->(b,16,12)
# nn.Conv2d(64,out_channels=128,kernel_size=2,stride=2,bias=False),#--->(b,128,2,2)
# nn.BatchNorm2d(128),
# # nn.GroupNorm(4, 16),
# nn.ReLU()
# )
# ##### 50*50 good
# self.pi_conv=nn.Sequential(
# nn.Conv2d(1,out_channels=4,kernel_size=4,stride=2,bias=False),#--->(b,4,24,24)
# nn.BatchNorm2d(4),
# nn.ReLU(),
# # nn.MaxPool1d(3, stride=2), #--->(b,4,23)
# nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,11,11)
# nn.BatchNorm2d(16),
# # nn.GroupNorm(4, 8),
# nn.ReLU(),
# # nn.MaxPool2d(3, stride=2), #--->(b,16,11,11)
# nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,5,5)
# nn.BatchNorm2d(64),
# # nn.GroupNorm(4, 16),
# nn.ReLU(),
# # nn.MaxPool1d(3, stride=2) #--->(b,16,12)
# nn.Conv2d(64,out_channels=256,kernel_size=3,stride=2,bias=False),#--->(b,256,2,2)
# nn.BatchNorm2d(256),
# # nn.GroupNorm(4, 16),
# nn.ReLU(),
# nn.MaxPool2d(2, stride=1), #--->(b,256,1,1)
# # nn.Conv2d(128,out_channels=256,kernel_size=3,stride=2,bias=False),#--->(b,256,2,2)
# # nn.BatchNorm2d(256),
# # # nn.GroupNorm(4, 16),
# # nn.ReLU()
# )
##### 50*50
self.pi_conv=nn.Sequential(
nn.Conv2d(1,out_channels=4,kernel_size=4,stride=2,bias=False),#--->(b,4,23,23)
nn.BatchNorm2d(4),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2), #--->(b,4,23)
nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,11,11)
nn.BatchNorm2d(16),
# nn.GroupNorm(4, 8),
nn.ReLU(),
# nn.MaxPool2d(3, stride=2), #--->(b,16,11,11)
nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,5,5)
nn.BatchNorm2d(64),
# nn.GroupNorm(4, 16),
nn.ReLU(),
# nn.MaxPool1d(3, stride=2) #--->(b,16,12)
nn.Conv2d(64,out_channels=128,kernel_size=3,stride=2,bias=False),#--->(b,256,2,2)
nn.BatchNorm2d(128),
# nn.GroupNorm(4, 16),
nn.ReLU(),
# nn.MaxPool2d(2, stride=1), #--->(b,256,1,1)
# nn.Conv2d(128,out_channels=256,kernel_size=3,stride=2,bias=False),#--->(b,256,2,2)
# nn.BatchNorm2d(256),
# # nn.GroupNorm(4, 16),
# nn.ReLU()
)
# ##### 100*100
# self.pi_conv=nn.Sequential(
# nn.Conv2d(1,out_channels=4,kernel_size=5,stride=2,bias=False),#--->(b,4,48,48)
# nn.BatchNorm2d(4),
# nn.ReLU(),
# # nn.MaxPool1d(3, stride=2), #--->(b,4,23)
# nn.Conv2d(4,out_channels=16,kernel_size=3,stride=2,bias=False),#--->(b,16,23,23)
# nn.BatchNorm2d(16),
# # nn.GroupNorm(4, 8),
# nn.ReLU(),
# nn.MaxPool2d(3, stride=2), #--->(b,16,11,11)
# nn.Conv2d(16,out_channels=64,kernel_size=3,stride=2,bias=False),#--->(b,64,5,5)
# nn.BatchNorm2d(64),
# # nn.GroupNorm(4, 16),
# nn.ReLU(),
# # nn.MaxPool1d(3, stride=2) #--->(b,16,12)
# nn.Conv2d(64,out_channels=256,kernel_size=3,stride=2,bias=False),#--->(b,256,2,2)
# nn.BatchNorm2d(256),
# # nn.GroupNorm(4, 16),
# nn.ReLU(),
# nn.MaxPool2d(2, stride=1), #--->(b,256,1,1)
# # nn.Conv2d(128,out_channels=256,kernel_size=3,stride=2,bias=False),#--->(b,256,2,2)
# # nn.BatchNorm2d(256),
# # # nn.GroupNorm(4, 16),
# # nn.ReLU()
# )
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
self.cuda(device_id)
def forward(self, pi): #B,C,H,W
"""
inputs:
"""
pi=pi.unsqueeze(1)
b,c,h,w=pi.size()
# pd1=pd1.unsqueeze(1)
pi=self.pi_conv(pi)
pi=pi.view(b,-1)
y= self.classify(pi)
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer=None):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
# print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (pi, targets) in enumerate(dataloader): #zhuyijie
# inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
# knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pi = pi.to(torch.device('cuda:0'),dtype=torch.float32)
self.optimizer.zero_grad()
outputs = self(pi)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
# print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
# print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (pi, targets) in enumerate(dataloader): #zhuyijie
# inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
# knn_graph = knn_graph.to(torch.device('cuda:0'),dtype=torch.int)
pi = pi.to(torch.device('cuda:0'),dtype=torch.float32)
outputs = self(pi)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
| 39.693235
| 177
| 0.531177
| 24,786
| 187,749
| 3.886952
| 0.018357
| 0.005314
| 0.022794
| 0.021756
| 0.963754
| 0.957194
| 0.952804
| 0.949783
| 0.944676
| 0.939652
| 0
| 0.064102
| 0.309269
| 187,749
| 4,729
| 178
| 39.701628
| 0.678788
| 0.153466
| 0
| 0.904418
| 0
| 0
| 0.04413
| 0.006892
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.003306
| null | null | 0.042381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c383badf18708123857622d7fb860c6173aada5c
| 190,767
|
py
|
Python
|
IATI2LOD/src/conversion scripts/IatiElements.py
|
KasperBrandt/IATI2LOD
|
3a4fbcbf59d324e948b14509f74c50633d36a497
|
[
"MIT"
] | 1
|
2019-08-03T00:52:44.000Z
|
2019-08-03T00:52:44.000Z
|
IATI2LOD/src/conversion scripts/IatiElements.py
|
KasperBrandt/IATI2LOD
|
3a4fbcbf59d324e948b14509f74c50633d36a497
|
[
"MIT"
] | 1
|
2015-10-11T09:47:25.000Z
|
2015-10-16T12:58:43.000Z
|
IATI2LOD/src/conversion scripts/IatiElements.py
|
KasperBrandt/IATI2LOD
|
3a4fbcbf59d324e948b14509f74c50633d36a497
|
[
"MIT"
] | 1
|
2021-05-29T03:43:01.000Z
|
2021-05-29T03:43:01.000Z
|
## By Kasper Brandt
## Last updated on 02-06-2013
from rdflib import RDF, RDFS, Literal, URIRef, Namespace, OWL
from rdflib.graph import Graph
import AttributeHelper, hashlib
class ActivityElements :
'''Class for converting XML elements of self.iati activities to a RDFLib self.graph.'''
def __init__(self, defaults):
'''Initializes class.
Parameters
@defaults: A dictionary of defaults.'''
self.id = defaults['id'].replace(" ", "%20")
self.default_language = defaults['language']
self.default_currency = defaults['currency']
self.default_finance_type = defaults['finance_type']
self.default_flow_type = defaults['flow_type']
self.default_aid_type = defaults['aid_type']
self.default_tied_status = defaults['tied_status']
self.hierarchy = defaults['hierarchy']
self.linked_data_uri = defaults['linked_data_uri']
self.iati = defaults['namespace']
self.iati_custom = Namespace(defaults['namespace'] + "custom/")
self.graph = Graph()
self.graph.bind('iati', self.iati)
self.graph.bind('iati-custom', self.iati_custom)
self.graph.bind('activity', self.iati['activity/'])
self.graph.bind('related-activity', self.iati['related-activity/'])
self.graph.add((self.iati['activity/' + self.id],
RDF.type,
self.iati['activity']))
if not self.hierarchy == None:
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-hierarchy'],
Literal(self.hierarchy)))
if not self.linked_data_uri == None:
self.linked_data_uri = self.linked_data_uri.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
OWL.sameAs,
URIRef(self.linked_data_uri)))
def get_result(self):
'''Returns the resulting self.graph of the activity.
Returns
@graph: The RDFLib self.graph with added statements.'''
return self.graph
def process_unknown_tag(self, tag):
'''Returns the correct tag for use in unknown elements.
Parameters
@tag: The original tag.
Returns
@namespace: The RDFLib Namespace to be used.
@name: The name of the tag.'''
tag = tag.replace("{", "").replace("}", "")
if ":" in tag:
if tag[:4] == "http":
return Namespace(tag.replace(" ", "-")), tag.rsplit('/',1)[1].replace(" ", "%20")
else:
tag = tag.split(":")[1]
if tag[:9] == "activity-":
return Namespace(self.iati[tag.replace(" ", "-")]), tag.replace(" ", "%20")
else:
return Namespace(self.iati["activity-" + tag.replace(" ", "-")]), str("activity-" + tag.replace(" ", "%20"))
else:
if tag[:9] == "activity-":
return Namespace(self.iati[tag.replace(" ", "-")]), tag.replace(" ", "%20")
else:
return Namespace(self.iati["activity-" + tag.replace(" ", "-")]), str("activity-" + tag.replace(" ", "%20"))
def convert_unknown(self, xml):
'''Converts non-IATI standard elements up to 2 levels to a RDFLib self.graph.
Parameters:
@xml: The XML of this element.'''
if not "ignore" in xml.tag:
namespace, name = self.process_unknown_tag(xml.tag)
children_elements = xml.findall("./")
if children_elements == []:
# No children
if (not xml.text == None) and (not xml.text == ""):
if len(xml.text) > 1:
self.graph.add((self.iati['activity/' + self.id],
namespace,
Literal(xml.text)))
for key in xml.attrib:
key_text = xml.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
self.graph.add((self.iati['activity/' + self.id],
URIRef(namespace + '-' + str(key).replace(" ", "-")),
Literal(key_text)))
else:
# Does have children
self.graph.add((self.iati['activity/' + self.id],
namespace,
self.iati['activity/' + self.id + '/' + str(name)]))
for key in xml.attrib:
key_text = xml.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
self.graph.add((self.iati['activity/' + self.id + '/' + str(name)],
self.iati_custom[str(key).replace(" ", "-")],
Literal(key_text)))
for child in xml:
children_elements = child.findall("./")
child_namespace, child_name = self.process_unknown_tag(child.tag)
if children_elements == []:
# No grand-children
if not child.text == None:
if len(child.text) > 1:
self.graph.add((self.iati['activity/' + self.id + '/' + str(name)],
child_namespace,
Literal(child.text)))
for key in child.attrib:
key_text = child.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
self.graph.add((self.iati['activity/' + self.id + '/' + str(name)],
URIRef(child_namespace + '-' + str(key).replace(" ", "-")),
Literal(key_text)))
else:
# Has grand-children
self.graph.add((self.iati['activity/' + self.id + '/' + str(name)],
URIRef(namespace + '-' + str(child_name)),
self.iati['activity/' + self.id + '/' + str(name) + '/' + str(child_name)]))
for key in child.attrib:
key_text = child.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
self.graph.add((self.iati['activity/' + self.id + '/' + str(name) + '/' + str(child_name)],
self.iati_custom[str(key).replace(" ", "-")],
Literal(key_text)))
for grandchild in child:
grandchildren_elements = grandchild.findall("./")
grandchild_namespace, grandchild_name = self.process_unknown_tag(grandchild.tag)
if grandchildren_elements == []:
# No grand-grand-children
if not grandchild == None:
if len(grandchild.text) > 1:
self.graph.add((self.iati['activity/' + self.id + '/' + str(name) + '/' + str(child_name)],
grandchild_namespace,
Literal(grandchild.text)))
for key in grandchild.attrib:
key_text = grandchild.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
self.graph.add((self.iati['activity/' + self.id + '/' + str(name) + '/' + str(child_name)],
URIRef(grandchild_namespace + '-' + str(key).replace(" ", "-")),
Literal(key_text)))
else:
# Three levels
print "Three levels for a non-IATI element (" + str(name) + ") is not supported..."
def reporting_org(self, xml):
'''Converts the XML of the reporting-org element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
ref = AttributeHelper.attribute_key(xml, 'ref')
type = AttributeHelper.attribute_key(xml, 'type')
# Text
name = AttributeHelper.attribute_language(xml, self.default_language)
if not ref == None:
ref = ref.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-reporting-org'],
self.iati['activity/' + str(self.id) + '/reporting-org/' + str(ref)]))
self.graph.add((self.iati['activity/' + str(self.id) + '/reporting-org/' + str(ref)],
RDF.type,
self.iati['organisation']))
self.graph.add((self.iati['activity/' + str(self.id) + '/reporting-org/' + str(ref)],
self.iati['organisation-code'],
self.iati['codelist/OrganisationIdentifier/' + str(ref)]))
if not name == None:
self.graph.add((self.iati['activity/' + self.id + '/reporting-org/' + str(ref)],
RDFS.label,
name))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/reporting-org/' + str(ref)],
self.iati['organisation-type'],
self.iati['codelist/OrganisationType/' + str(type)]))
elif not name == None:
# Create hash
# Required: name
hash = hashlib.md5()
hash.update(name)
hash_name = hash.hexdigest()
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-reporting-org'],
self.iati['activity/' + str(self.id) + '/reporting-org/' + str(hash_name)]))
self.graph.add((self.iati['activity/' + str(self.id) + '/reporting-org/' + str(hash_name)],
RDF.type,
self.iati['organisation']))
self.graph.add((self.iati['activity/' + str(self.id) + '/reporting-org/' + str(hash_name)],
RDFS.label,
name))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + str(self.id) + '/reporting-org/' + str(hash_name)],
self.iati['organisation-type'],
self.iati['codelist/OrganisationType/' + str(type)]))
def iati_identifier(self, xml):
'''Converts the XML of the self.iati-identifier element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Text
id = xml.text
if not id == None:
id = " ".join(id.split())
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-id'],
Literal(id)))
def other_identifier(self, xml):
'''Converts the XML of the other-identifier element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
owner_ref = AttributeHelper.attribute_key(xml, 'owner-ref')
owner_name = AttributeHelper.attribute_key(xml, 'owner-name')
# Text
name = xml.text
if not name == None:
# Create hash
# Required: name
hash = hashlib.md5()
hash.update(name)
hash_name = hash.hexdigest()
name = " ".join(name.split())
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-other-identifier'],
self.iati['activity/' + self.id + '/other-identifier/' + str(hash_name)]))
self.graph.add((self.iati['activity/' + self.id + '/other-identifier/' + str(hash_name)],
RDFS.label,
Literal(name)))
if not owner_ref == None:
owner_ref = owner_ref.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/other-identifier/' + str(hash_name)],
self.iati['other-identifier-owner-ref'],
self.iati['codelist/OrganisationIdentifier/' + str(owner_ref)]))
if not owner_name == None:
self.graph.add((self.iati['activity/' + self.id + '/other-identifier/' + str(hash_name)],
self.iati['other-identifier-owner-name'],
Literal(owner_name)))
def activity_website(self, xml):
'''Converts the XML of the activity-website element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Text
website = xml.text.replace(" ", "%20")
if not website == None:
website = "%20".join(website.split())
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-website'],
URIRef(website)))
def title(self, xml):
'''Converts the XML of the title element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Text
title = AttributeHelper.attribute_language(xml, self.default_language)
if not title == None:
self.graph.add((self.iati['activity/' + self.id],
RDFS.label,
title))
def description(self, xml):
'''Converts the XML of the description element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
type = AttributeHelper.attribute_key(xml, 'type')
# Text
description = AttributeHelper.attribute_language(xml, self.default_language)
if not description == None:
# Create hash
# Required: description
hash = hashlib.md5()
hash.update(description)
hash_description = hash.hexdigest()
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-description'],
self.iati['activity/' + self.id + '/description/' + str(hash_description)]))
self.graph.add((self.iati['activity/' + self.id + '/description/' + str(hash_description)],
RDF.type,
self.iati['description']))
self.graph.add((self.iati['activity/' + self.id + '/description/' + str(hash_description)],
self.iati['description-text'],
description))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/description/' + str(hash_description)],
self.iati['description-type'],
self.iati['codelist/DescriptionType/' + str(type)]))
def activity_status(self, xml):
'''Converts the XML of the activity-status element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-status'],
self.iati['codelist/ActivityStatus/' + str(code)]))
def activity_date(self, xml):
'''Converts the XML of the activity-date element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
type = AttributeHelper.attribute_key(xml, 'type')
iso_date = AttributeHelper.attribute_key(xml, 'iso-date')
# Text
name = AttributeHelper.attribute_language(xml, self.default_language)
if not type == None:
if not iso_date == None:
self.graph.add((self.iati['activity/' + self.id],
self.iati[type + '-date'],
Literal(iso_date)))
if not name == None:
self.graph.add((self.iati['activity/' + self.id],
self.iati[type + '-text'],
Literal(name)))
def contact_info(self, xml):
'''Converts the XML of the contact-info element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Create hash
# Required: one of organisation, person-name, telephone, email, mailing-address
hash = hashlib.md5()
hash_created = False
organisation_text = AttributeHelper.attribute_text(xml, 'organisation')
if not organisation_text == None:
hash.update(organisation_text[0])
hash_created = True
person_name_text = AttributeHelper.attribute_text(xml, 'person-name')
if not person_name_text == None:
hash.update(person_name_text[0])
hash_created = True
telephone_text = AttributeHelper.attribute_text(xml, 'telephone')
if not telephone_text == None:
hash.update(telephone_text[0])
hash_created = True
email_text = AttributeHelper.attribute_text(xml, 'email')
if not email_text == None:
hash.update(email_text[0])
hash_created = True
mailing_address_text = AttributeHelper.attribute_text(xml, 'mailing-address')
if not mailing_address_text == None:
hash.update(mailing_address_text[0])
hash_created = True
if hash_created:
hash_contact_info = hash.hexdigest()
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-contact-info'],
self.iati['activity/' + self.id + '/contact-info/' + str(hash_contact_info)]))
self.graph.add((self.iati['activity/' + self.id + '/contact-info/' + str(hash_contact_info)],
RDF.type,
self.iati['contact-info']))
for element in xml:
info = element.text
if not info == None:
info = " ".join(info.split())
property = "contact-info-" + str(element.tag).replace(" ", "-")
self.graph.add((self.iati['activity/' + self.id + '/contact-info/' + str(hash_contact_info)],
self.iati[property],
Literal(info)))
def participating_org(self, xml):
'''Converts the XML of the participating-org element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
ref = AttributeHelper.attribute_key(xml, 'ref')
type = AttributeHelper.attribute_key(xml, 'type')
role = AttributeHelper.attribute_key(xml, 'role')
# Text
name = AttributeHelper.attribute_language(xml, self.default_language)
if not ref == None:
ref = ref.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-participating-org'],
self.iati['activity/' + self.id + '/participating-org/' + str(ref)]))
self.graph.add((self.iati['activity/' + self.id + '/participating-org/' + str(ref)],
RDF.type,
self.iati['organisation']))
self.graph.add((self.iati['activity/' + self.id + '/participating-org/' + str(ref)],
self.iati['organisation-code'],
self.iati['codelist/OrganisationIdentifier/' + str(ref)]))
if not name == None:
self.graph.add((self.iati['activity/' + self.id + '/participating-org/' + str(ref)],
RDFS.label,
name))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/participating-org/' + str(ref)],
self.iati['organisation-type'],
self.iati['codelist/OrganisationType/' + str(type)]))
if not role == None:
role = role.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/participating-org/' + str(ref)],
self.iati['organisation-role'],
self.iati['codelist/OrganisationRole/' + str(role)]))
elif not name == None:
# Create hash
# Required: name
hash = hashlib.md5()
hash.update(name)
hash_name = hash.hexdigest()
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-participating-org'],
self.iati['activity/' + self.id + '/participating-org/' + str(hash_name)]))
self.graph.add((self.iati['activity/' + self.id + '/participating-org/' + str(hash_name)],
RDF.type,
self.iati['organisation']))
self.graph.add((self.iati['activity/' + self.id + '/participating-org/' + str(hash_name)],
RDFS.label,
name))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/participating-org/' + str(hash_name)],
self.iati['organisation-type'],
self.iati['codelist/OrganisationType/' + str(type)]))
if not role == None:
role = role.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/participating-org/' + str(hash_name)],
self.iati['organisation-role'],
self.iati['codelist/OrganisationRole/' + str(role)]))
def recipient_country(self, xml):
'''Converts the XML of the recipient-country element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
percentage = AttributeHelper.attribute_key(xml, 'percentage')
# Text
country_name = AttributeHelper.attribute_language(xml, self.default_language)
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-recipient-country'],
self.iati['activity/' + self.id + '/recipient-country/' + str(code)]))
self.graph.add((self.iati['activity/' + self.id + '/recipient-country/' + str(code)],
RDF.type,
self.iati['country']))
self.graph.add((self.iati['activity/' + self.id + '/recipient-country/' + str(code)],
self.iati['country-code'],
self.iati['codelist/Country/' + str(code)]))
if not country_name == None:
self.graph.add((self.iati['activity/' + self.id + '/recipient-country/' + str(code)],
RDFS.label,
country_name))
if not percentage == None:
self.graph.add((self.iati['activity/' + self.id + '/recipient-country/' + str(code)],
self.iati['percentage'],
Literal(percentage)))
def recipient_region(self, xml):
'''Converts the XML of the recipient-region element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
percentage = AttributeHelper.attribute_key(xml, 'percentage')
# Text
region_name = AttributeHelper.attribute_language(xml, self.default_language)
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-recipient-region'],
self.iati['activity/' + self.id + '/recipient-region/' + str(code)]))
self.graph.add((self.iati['activity/' + self.id + '/recipient-region/' + str(code)],
RDF.type,
self.iati['region']))
self.graph.add((self.iati['activity/' + self.id + '/recipient-region/' + str(code)],
self.iati['region-code'],
self.iati['codelist/Region/' + str(code)]))
if not region_name == None:
self.graph.add((self.iati['activity/' + self.id + '/recipient-region/' + str(code)],
RDFS.label,
region_name))
if not percentage == None:
self.graph.add((self.iati['activity/' + self.id + '/recipient-region/' + str(code)],
self.iati['percentage'],
Literal(percentage)))
def location(self, xml):
'''Converts the XML of the location element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
percentage = AttributeHelper.attribute_key(xml, 'percentage')
# Elements
name = xml.find('name')
descriptions = xml.findall('description')
location_type = xml.find('location-type')
administrative = xml.find('administrative')
coordinates = xml.find('coordinates')
gazetteer_entry = xml.find('gazetteer-entry')
# Create hash
# Required: one of name, description, administrative (text / country / adm1 / adm2),
# coordinates (lat / long), gazetteer entry
hash = hashlib.md5()
hash_created = False
name_text = AttributeHelper.attribute_text(xml, 'name')
if not name_text == None:
hash.update(name_text[0])
hash_created = True
description_text = AttributeHelper.attribute_text(xml, 'description')
if not description_text == None:
hash.update(description_text[0])
hash_created = True
administrative_text = AttributeHelper.attribute_text(xml, 'administrative')
if not administrative_text == None:
hash.update(administrative_text[0])
hash_created = True
gazetteer_entry_text = AttributeHelper.attribute_text(xml, 'gazetteer-entry')
if not gazetteer_entry_text == None:
hash.update(gazetteer_entry_text[0])
hash_created = True
if not administrative == None:
# Keys
administrative_country = AttributeHelper.attribute_key(administrative, 'country')
administrative_adm1 = AttributeHelper.attribute_key(administrative, 'adm1')
administrative_adm2 = AttributeHelper.attribute_key(administrative, 'adm2')
if not administrative_country == None:
hash.update(administrative_country)
hash_created = True
if not administrative_adm1 == None:
hash.update(administrative_adm1)
hash_created = True
if not administrative_adm2 == None:
hash.update(administrative_adm2)
hash_created = True
if not coordinates == None:
# Keys
latitude = AttributeHelper.attribute_key(coordinates, 'latitude')
longitude = AttributeHelper.attribute_key(coordinates, 'longitude')
if not latitude == None:
hash.update(latitude)
hash_created = True
if not longitude == None:
hash.update(longitude)
hash_created = True
if hash_created:
hash_location = hash.hexdigest()
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-location'],
self.iati['activity/' + self.id + '/location/' + str(hash_location)]))
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
RDF.type,
self.iati['location']))
if not name == None:
# Text
name_text = AttributeHelper.attribute_language(name, self.default_language)
if not name_text == None:
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
RDFS.label,
name_text))
if not descriptions == []:
for description in descriptions:
# Keys
type = AttributeHelper.attribute_key(description, 'type')
# Text
description_text = AttributeHelper.attribute_language(description, self.default_language)
if not description_text == None:
# Create hash
# Required: description
hash_description = hashlib.md5()
description_nolanguage = description.text
hash_description.update(description_nolanguage)
hash_location_description = hash_description.hexdigest()
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
self.iati['location-description'],
self.iati['activity/' + self.id + '/location/' + str(hash_location) +
'/description/' + str(hash_location_description)]))
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location) +
'/description/' + str(hash_location_description)],
RDF.type,
self.iati['description']))
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location) +
'/description/' + str(hash_location_description)],
self.iati['description-text'],
description_text))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location) +
'/description/' + str(hash_location_description)],
self.iati['description-type'],
self.iati['codelist/DescriptionType/' + str(type)]))
if not location_type == None:
# Keys
location_type_code = AttributeHelper.attribute_key(location_type, 'code')
if not location_type_code == None:
location_type_code = location_type_code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
self.iati['location-type'],
self.iati['codelist/LocationType/' + str(location_type_code)]))
if not administrative == None:
# Keys
administrative_country = AttributeHelper.attribute_key(administrative, 'country')
administrative_adm1 = AttributeHelper.attribute_key(administrative, 'adm1')
administrative_adm2 = AttributeHelper.attribute_key(administrative, 'adm2')
# Text
administrative_text = AttributeHelper.attribute_language(administrative, self.default_language)
# Create hash
# Required: one of administrative country / adm1 / adm2 / text
hash_administrative = hashlib.md5()
hash_administrative_created= False
administrative_hash_text = AttributeHelper.attribute_text(xml, 'administrative')
if not administrative_hash_text == None:
hash_administrative.update(administrative_hash_text[0])
hash_administrative_created= True
if not administrative_country == None:
hash_administrative.update(administrative_country)
hash_administrative_created= True
if not administrative_adm1 == None:
hash_administrative.update(administrative_adm1)
hash_administrative_created= True
if not administrative_adm2 == None:
hash_administrative.update(administrative_adm2)
hash_administrative_created= True
if hash_administrative_created:
hash_location_administrative = hash_administrative.hexdigest()
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
self.iati['location-administrative'],
self.iati['activity/' + self.id + '/location/' + str(hash_location)
+ '/administrative/' + str(hash_location_administrative)]))
if not administrative_country == None:
administrative_country = administrative_country.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)
+ '/administrative/' + str(hash_location_administrative)],
self.iati['administrative-country'],
self.iati['codelist/Country/' + str(administrative_country)]))
if not administrative_adm1 == None:
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)
+ '/administrative/' + str(hash_location_administrative)],
self.iati['administrative-adm1'],
Literal(administrative_adm1)))
if not administrative_adm2 == None:
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)
+ '/administrative/' + str(hash_location_administrative)],
self.iati['administrative-adm2'],
Literal(administrative_adm2)))
if not administrative_text == None:
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)
+ '/administrative/' + str(hash_location_administrative)],
self.iati['administrative-country-text'],
administrative_text))
if not coordinates == None:
# Keys
latitude = AttributeHelper.attribute_key(coordinates, 'latitude')
longitude = AttributeHelper.attribute_key(coordinates, 'longitude')
precision = AttributeHelper.attribute_key(coordinates, 'precision')
if not latitude == None:
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
self.iati['latitude'],
Literal(latitude)))
if not longitude == None:
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
self.iati['longitude'],
Literal(longitude)))
if not precision == None:
precision = precision.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
self.iati['coordinates-precision'],
self.iati['codelist/GeographicalPrecision/' + str(precision)]))
if not gazetteer_entry == None:
# Keys
gazetteer_ref = AttributeHelper.attribute_key(gazetteer_entry, 'gazetteer-ref')
# Text
gazetteer_entry_text = gazetteer_entry.text
if (not gazetteer_ref == None) and (not gazetteer_entry_text == None):
gazetteer_ref = gazetteer_ref.replace(" ", "%20")
gazetteer_entry_text = " ".join(gazetteer_entry_text.split())
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
self.iati['location-gazetteer-entry'],
self.iati['activity/' + self.id + '/location/' + str(hash_location) +
'/gazetteer-entry/' + str(gazetteer_ref)]))
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location) +
'/gazetteer-entry/' + str(gazetteer_ref)],
RDF.type,
self.iati['gazetteer-entry']))
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location) +
'/gazetteer-entry/' + str(gazetteer_ref)],
self.iati['gazetteer-ref'],
self.iati['codelist/GazetteerAgency/' + str(gazetteer_ref)]))
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location) +
'/gazetteer-entry/' + str(gazetteer_ref)],
self.iati['gazetteer-entry'],
Literal(gazetteer_entry_text)))
if gazetteer_ref == "GEO":
gazetteer_entry_text = gazetteer_entry_text.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/location/' + str(hash_location)],
OWL.sameAs,
URIRef("http://sws.geonames.org/" + gazetteer_entry_text)))
def sector(self, xml):
'''Converts the XML of the sector element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
vocabulary = AttributeHelper.attribute_key(xml, 'vocabulary')
percentage = AttributeHelper.attribute_key(xml, 'percentage')
# Text
name = AttributeHelper.attribute_language(xml, self.default_language)
if (not code == None) and (not vocabulary == None):
code = code.replace(" ", "%20")
vocabulary = vocabulary.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-sector'],
self.iati['activity/' + self.id + '/sector/' + str(vocabulary) +
'/' + str(code)]))
self.graph.add((self.iati['activity/' + self.id + '/sector/' + str(vocabulary) +
'/' + str(code)],
RDF.type,
self.iati['sector']))
self.graph.add((self.iati['activity/' + self.id + '/sector/' + str(vocabulary) +
'/' + str(code)],
self.iati['sector-code'],
self.iati['codelist/Sector/' + str(code)]))
self.graph.add((self.iati['activity/' + self.id + '/sector/' + str(vocabulary) +
'/' + str(code)],
self.iati['sector-vocabulary'],
self.iati['codelist/Vocabulary/' + str(vocabulary)]))
if not percentage == None:
self.graph.add((self.iati['activity/' + self.id + '/sector/' + str(vocabulary) +
'/' + str(code)],
self.iati['percentage'],
Literal(percentage)))
if not name == None:
self.graph.add((self.iati['activity/' + self.id + '/sector/' + str(vocabulary) +
'/' + str(code)],
RDFS.label,
name))
def policy_marker(self, xml):
'''Converts the XML of the policy-marker element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
vocabulary = AttributeHelper.attribute_key(xml, 'vocabulary')
significance = AttributeHelper.attribute_key(xml, 'significance')
# Text
name = AttributeHelper.attribute_language(xml, self.default_language)
if (not code == None) and (not vocabulary == None):
code = code.replace(" ", "%20")
vocabulary = vocabulary.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-policy-marker'],
self.iati['activity/' + self.id + '/policy-marker/' + str(vocabulary) +
'/' + str(code)]))
self.graph.add((self.iati['activity/' + self.id + '/policy-marker/' + str(vocabulary) +
'/' + str(code)],
RDF.type,
self.iati['policy-marker']))
self.graph.add((self.iati['activity/' + self.id + '/policy-marker/' + str(vocabulary) +
'/' + str(code)],
self.iati['policy-marker-code'],
self.iati['codelist/PolicyMarker/' + str(code)]))
self.graph.add((self.iati['activity/' + self.id + '/policy-marker/' + str(vocabulary) +
'/' + str(code)],
self.iati['policy-marker-vocabulary'],
self.iati['codelist/Vocabulary/' + str(vocabulary)]))
if not significance == None:
significance = significance.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/policy-marker/' + str(vocabulary) +
'/' + str(code)],
self.iati['significance-code'],
self.iati['codelist/PolicySignificance/' + str(significance)]))
if not name == None:
self.graph.add((self.iati['activity/' + self.id + '/policy-marker/' + str(vocabulary) +
'/' + str(code)],
RDFS.label,
name))
def collaboration_type(self, xml):
'''Converts the XML of the collaboration-type element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-collaboration-type'],
self.iati['codelist/CollaborationType/' + str(code)]))
def finance_type(self, xml):
'''Converts the XML of the default-finance-type element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-default-finance-type'],
self.iati['codelist/FinanceType/' + str(code)]))
def flow_type(self, xml):
'''Converts the XML of the default-flow-type element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-default-flow-type'],
self.iati['codelist/FlowType/' + str(code)]))
def aid_type(self, xml):
'''Converts the XML of the default-aid-type element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-default-aid-type'],
self.iati['codelist/AidType/' + str(code)]))
def tied_status(self, xml):
'''Converts the XML of the default-tied-status element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
code = AttributeHelper.attribute_key(xml, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-default-tied-status'],
self.iati['codelist/TiedStatus/' + str(code)]))
def budget(self, xml):
'''Converts the XML of the budget element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
type = AttributeHelper.attribute_key(xml, 'type')
# Elements
period_start = xml.find('period-start')
period_end = xml.find('period-end')
value = xml.find('value')
# Create hash
# Required: one of value, period-start (iso-date / text), period-end (iso-date / text)
hash = hashlib.md5()
hash_created = False
if not period_start == None:
# Keys
period_start_date = AttributeHelper.attribute_key(period_start, 'iso-date')
if not period_start_date == None:
hash.update(period_start_date)
hash_created = True
period_start_text = period_start.text
if not period_start_text == None:
hash.update(period_start_text)
hash_created = True
if not period_end == None:
# Keys
period_end_date = AttributeHelper.attribute_key(period_end, 'iso-date')
if not period_end_date == None:
hash.update(period_end_date)
hash_created = True
period_end_text = period_end.text
if not period_end_text == None:
hash.update(period_end_text)
hash_created = True
if not value == None:
value_text = value.text
if not value_text == None:
hash.update(value_text)
hash_created = True
if hash_created:
hash_budget = hash.hexdigest()
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-budget'],
self.iati['activity/' + self.id + '/budget/' + str(hash_budget)]))
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
RDF.type,
self.iati['budget']))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
self.iati['budget-type'],
self.iati['codelist/BudgetType/' + str(type)]))
if not period_start == None:
# Keys
date = AttributeHelper.attribute_key(period_start, 'iso-date')
# Text
period_start_text = AttributeHelper.attribute_language(period_start, self.default_language)
if not date == None:
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
self.iati['start-date'],
Literal(date)))
if not period_start_text == None:
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
self.iati['start-date-text'],
period_start_text))
if not period_end == None:
# Keys
date = AttributeHelper.attribute_key(period_end, 'iso-date')
# Text
period_end_text = AttributeHelper.attribute_language(period_end, self.default_language)
if not date == None:
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
self.iati['end-date'],
Literal(date)))
if not period_end_text == None:
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
self.iati['end-date-text'],
period_end_text))
if not value == None:
# Keys
currency = AttributeHelper.attribute_key(value, 'currency')
value_date = AttributeHelper.attribute_key(value, 'value-date')
# Text
value_text = value.text
if not value_text == None:
value_text = " ".join(value_text.split())
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
self.iati['value'],
Literal(value_text)))
if not currency == None:
currency = currency.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(currency)]))
elif not self.default_currency == None:
self.default_currency = self.default_currency.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(self.default_currency)]))
if not value_date == None:
self.graph.add((self.iati['activity/' + self.id + '/budget/' + str(hash_budget)],
self.iati['value-date'],
Literal(value_date)))
def planned_disbursement(self, xml):
'''Converts the XML of the planned-disbursement element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
updated = AttributeHelper.attribute_key(xml, 'updated')
# Elements
period_start = xml.find('period-start')
period_end = xml.find('period-end')
value = xml.find('value')
# Create hash
# Required: one of value, period-start (iso-date / text), period-end (iso-date / text)
hash = hashlib.md5()
hash_created = False
if not period_start == None:
# Keys
period_start_date = AttributeHelper.attribute_key(period_start, 'iso-date')
if not period_start_date == None:
hash.update(period_start_date)
hash_created = True
period_start_text = period_start.text
if not period_start_text == None:
hash.update(period_start_text)
hash_created = True
if not period_end == None:
# Keys
period_end_date = AttributeHelper.attribute_key(period_end, 'iso-date')
if not period_end_date == None:
hash.update(period_end_date)
hash_created = True
period_end_text = period_end.text
if not period_end_text == None:
hash.update(period_end_text)
hash_created = True
if not value == None:
value_text = value.text
if not value_text == None:
hash.update(value_text)
hash_created = True
if hash_created:
hash_planned_disbursement = hash.hexdigest()
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-planned-disbursement'],
self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)]))
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
RDF.type,
self.iati['planned-disbursement']))
if not updated == None:
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
self.iati['updated'],
Literal(updated)))
if not period_start == None:
# Keys
date = AttributeHelper.attribute_key(period_start, 'iso-date')
# Text
period_start_text = AttributeHelper.attribute_language(period_start, self.default_language)
if not date == None:
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
self.iati['start-date'],
Literal(date)))
if not period_start_text == None:
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
self.iati['start-date-text'],
period_start_text))
if not period_end == None:
# Keys
date = AttributeHelper.attribute_key(period_end, 'iso-date')
# Text
period_end_text = AttributeHelper.attribute_language(period_end, self.default_language)
if not date == None:
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
self.iati['end-date'],
Literal(date)))
if not period_end_text == None:
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
self.iati['end-date-text'],
period_end_text))
if not value == None:
# Keys
currency = AttributeHelper.attribute_key(value, 'currency')
value_date = AttributeHelper.attribute_key(value, 'value-date')
# Text
value_text = value.text
if not value_text == None:
value_text = " ".join(value_text.split())
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
self.iati['value'],
Literal(value_text)))
if not currency == None:
currency = currency.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(currency)]))
elif not self.default_currency == None:
self.default_currency = self.default_currency.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(self.default_currency)]))
if not value_date == None:
self.graph.add((self.iati['activity/' + self.id + '/planned-disbursement/' + str(hash_planned_disbursement)],
self.iati['value-date'],
Literal(value_date)))
def transaction(self, xml):
'''Converts the XML of the transaction element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
ref = AttributeHelper.attribute_key(xml, 'ref')
# Elements
aid_type = xml.find('aid-type')
descriptions = xml.findall('description')
disbursement_channel = xml.find('disbursement-channel')
finance_type = xml.find('finance-type')
flow_type = xml.find('flow-type')
provider_org = xml.find('provider-org')
receiver_org = xml.find('receiver-org')
tied_status = xml.find('tied-status')
transaction_date = xml.find('transaction-date')
transaction_type = xml.find('transaction-type')
value = xml.find('value')
# Create hash
# Required: one of value, description, transaction date
hash = hashlib.md5()
hash_created = False
value_text = AttributeHelper.attribute_text(xml, 'value')
if not value_text == None:
hash.update(value_text[0])
hash_created = True
description_text = AttributeHelper.attribute_text(xml, 'description')
if not description_text == None:
hash.update(description_text[0])
hash_created = True
if not transaction_date == None:
# Keys
iso_date = AttributeHelper.attribute_key(transaction_date, 'iso-date')
if not iso_date == None:
hash.update(iso_date)
hash_created = True
if (hash_created) or (not ref == None):
hash_transaction = hash.hexdigest()
if not ref == None:
ref_url = ref.replace(" ", "%20")
transaction_id = self.iati['activity/' + self.id + '/transaction/' + str(ref_url)]
self.graph.add((transaction_id,
self.iati['transaction-ref'],
Literal(ref)))
else:
transaction_id = self.iati['activity/' + self.id + '/transaction/' + str(hash_transaction)]
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-transaction'],
transaction_id))
self.graph.add((transaction_id,
RDF.type,
self.iati['transaction']))
if not aid_type == None:
# Keys
code = AttributeHelper.attribute_key(aid_type, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['aid-type'],
self.iati['codelist/AidType/' + str(code)]))
elif not self.default_aid_type == None:
self.default_aid_type = self.default_aid_type.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['aid-type'],
self.iati['codelist/AidType/' + str(self.default_aid_type)]))
elif not self.default_aid_type == None:
self.default_aid_type = self.default_aid_type.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['aid-type'],
self.iati['codelist/AidType/' + str(self.default_aid_type)]))
if not descriptions == []:
for description in descriptions:
# Keys
type = AttributeHelper.attribute_key(description, 'type')
# Text
description_text = AttributeHelper.attribute_language(description, self.default_language)
if not description_text == None:
# Create hash
# Required: description
hash_description = hashlib.md5()
description_nolanguage = description.text
hash_description.update(description_nolanguage)
hash_transaction_description = hash_description.hexdigest()
self.graph.add((transaction_id,
self.iati['transaction-description'],
URIRef(transaction_id + '/description/' + str(hash_transaction_description))))
self.graph.add((URIRef(transaction_id + '/description/' + str(hash_transaction_description)),
RDF.type,
self.iati['description']))
self.graph.add((URIRef(transaction_id + '/description/' + str(hash_transaction_description)),
self.iati['description-text'],
description_text))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((URIRef(transaction_id + '/description/' + str(hash_transaction_description)),
self.iati['description-type'],
self.iati['codelist/DescriptionType/' + str(type)]))
if not disbursement_channel == None:
# Keys
code = AttributeHelper.attribute_key(disbursement_channel, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['disbursement-channel'],
self.iati['codelist/disbursementChannel/' + str(code)]))
if not finance_type == None:
# Keys
code = AttributeHelper.attribute_key(finance_type, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['finance-type'],
self.iati['codelist/FinanceType/' + str(code)]))
elif not self.default_finance_type == None:
self.default_finance_type = self.default_finance_type.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['finance-type'],
self.iati['codelist/FinanceType/' + str(self.default_finance_type)]))
elif not self.default_finance_type == None:
self.default_finance_type = self.default_finance_type.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['finance-type'],
self.iati['codelist/FinanceType/' + str(self.default_finance_type)]))
if not flow_type == None:
# Keys
code = AttributeHelper.attribute_key(flow_type, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['flow-type'],
self.iati['codelist/FlowType/' + str(code)]))
elif not self.default_flow_type == None:
self.default_flow_type = self.default_flow_type.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['flow-type'],
self.iati['codelist/FlowType/' + str(self.default_flow_type)]))
elif not self.default_flow_type == None:
self.default_flow_type = self.default_flow_type.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['flow-type'],
self.iati['codelist/FlowType/' + str(self.default_flow_type)]))
if not provider_org == None:
# Keys
ref = AttributeHelper.attribute_key(provider_org, 'ref')
provider_activity_id = AttributeHelper.attribute_key(provider_org, 'provider-activity-id')
# Text
provider_org_text = provider_org.text
if not provider_org_text == None:
provider_org_text = " ".join(provider_org_text.split())
self.graph.add((transaction_id,
self.iati['provider-org-name'],
Literal(provider_org_text)))
if not ref == None:
ref = ref.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['provider-org'],
self.iati['codelist/OrganisationIdentifier/' + str(ref)]))
if not provider_activity_id == None:
provider_activity_id = provider_activity_id.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['provider-org-activity-id'],
self.iati['activity/' + str(provider_activity_id)]))
if not receiver_org == None:
# Keys
ref = AttributeHelper.attribute_key(receiver_org, 'ref')
receiver_activity_id = AttributeHelper.attribute_key(receiver_org, 'receiver-activity-id')
# Text
receiver_org_text = receiver_org.text
if not receiver_org_text == None:
receiver_org_text = " ".join(receiver_org_text.split())
self.graph.add((transaction_id,
self.iati['receiver-org-name'],
Literal(receiver_org_text)))
if not ref == None:
ref = ref.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['receiver-org'],
self.iati['codelist/OrganisationIdentifier/' + str(ref)]))
if not receiver_activity_id == None:
receiver_activity_id = receiver_activity_id.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['receiver-org-activity-id'],
self.iati['activity/' + str(receiver_activity_id)]))
if not tied_status == None:
# Keys
code = AttributeHelper.attribute_key(tied_status, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['tied-status'],
self.iati['codelist/TiedStatus/' + str(code)]))
elif not self.default_tied_status == None:
self.default_tied_status = self.default_tied_status.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['tied-status'],
self.iati['codelist/TiedStatus/' + str(self.default_tied_status)]))
elif not self.default_tied_status == None:
self.default_tied_status = self.default_tied_status.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['tied-status'],
self.iati['codelist/TiedStatus/' + str(self.default_tied_status)]))
if not transaction_date == None:
# Keys
iso_date = AttributeHelper.attribute_key(transaction_date, 'iso-date')
if not iso_date == None:
self.graph.add((transaction_id,
self.iati['transaction-date'],
Literal(iso_date)))
if not transaction_type == None:
# Keys
code = AttributeHelper.attribute_key(transaction_type, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['transaction-type'],
self.iati['codelist/TransactionType/' + str(code)]))
if not value == None:
# Keys
currency = AttributeHelper.attribute_key(value, 'currency')
value_date = AttributeHelper.attribute_key(value, 'value-date')
# Text
value_text = value.text
if not value_text == None:
value_text = " ".join(value_text.split())
self.graph.add((transaction_id,
self.iati['value'],
Literal(value_text)))
if not currency == None:
currency = currency.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(currency)]))
elif not self.default_currency == None:
self.default_currency = self.default_currency.replace(" ", "%20")
self.graph.add((transaction_id,
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(self.default_currency)]))
if not value_date == None:
self.graph.add((transaction_id,
self.iati['value-date'],
Literal(value_date)))
def document_link(self, xml):
'''Converts the XML of the document-link element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
url = AttributeHelper.attribute_key(xml, 'url')
format = AttributeHelper.attribute_key(xml, 'format')
# Elements
titles = xml.findall('title')
category = xml.find('category')
languages = xml.findall('language')
if not url == None:
# Create hash
# Required: url
hash = hashlib.md5()
hash.update(url)
hash_document_link = hash.hexdigest()
url = url.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-document-link'],
self.iati['activity/' + self.id + 'document-link/' + str(hash_document_link)]))
self.graph.add((self.iati['activity/' + self.id + 'document-link/' + str(hash_document_link)],
RDF.type,
self.iati['document-link']))
self.graph.add((self.iati['activity/' + self.id + 'document-link/' + str(hash_document_link)],
self.iati['url'],
URIRef(url)))
if not format == None:
format = format.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + 'document-link/' + str(hash_document_link)],
self.iati['format'],
self.iati['codelist/FileFormat/' + str(format)]))
if not titles == []:
for title in titles:
# Text
name = AttributeHelper.attribute_language(title, self.default_language)
if not name == None:
self.graph.add((self.iati['activity/' + self.id + 'document-link/' + str(hash_document_link)],
RDFS.label,
name))
if not category == None:
# Keys
code = AttributeHelper.attribute_key(category, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + 'document-link/' + str(hash_document_link)],
self.iati['document-category'],
self.iati['codelist/DocumentCategory/' + str(code)]))
if not languages == []:
for language in languages:
# Keys
code = AttributeHelper.attribute_key(language, 'code')
# Text
name = AttributeHelper.attribute_language(language, self.default_language)
if not code == None:
self.graph.add((self.iati['activity/' + self.id + 'document-link/' + str(hash_document_link)],
self.iati['language'],
Literal(code)))
if not name == None:
self.graph.add((self.iati['activity/' + self.id + 'document-link/' + str(hash_document_link)],
self.iati['language-text'],
name))
def related_activity(self, xml):
'''Converts the XML of the related-activity element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
ref = AttributeHelper.attribute_key(xml, 'ref')
type = AttributeHelper.attribute_key(xml, 'type')
# Text
name = AttributeHelper.attribute_language(xml, self.default_language)
if not ref == None:
ref = ref.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id],
self.iati['related-activity'],
self.iati['activity/' + self.id + '/related-activity/' + str(ref)]))
self.graph.add((self.iati['activity/' + self.id + '/related-activity/' + str(ref)],
self.iati['activity'],
self.iati['activity/' + str(ref)]))
self.graph.add((self.iati['activity/' + self.id + '/related-activity/' + str(ref)],
self.iati['related-activity-id'],
Literal(ref)))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/related-activity/' + str(ref)],
self.iati['related-activity-type'],
self.iati['codelist/RelatedActivityType/' + str(type)]))
if not name == None:
self.graph.add((self.iati['activity/' + self.id + '/related-activity/' + str(ref)],
RDFS.label,
name))
def conditions(self, xml):
'''Converts the XML of the conditions element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Elements
conditions_container = xml.find('conditions')
conditions = conditions_container.findall('condition')
if not conditions == []:
for condition in conditions:
# Keys
type = AttributeHelper.attribute_key(condition, 'type')
# Text
condition_text = AttributeHelper.attribute_language(condition, self.default_language)
if not condition_text == None:
condition_text_text = condition.text
#Create hash
hash = hashlib.md5()
hash.update(condition_text_text)
hash_condition = hash.hexdigest()
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-condition'],
self.iati['activity/' + self.id + '/condition/' + str(hash_condition)]))
self.graph.add((self.iati['activity/' + self.id + '/condition/' + str(hash_condition)],
RDF.type,
self.iati['condition']))
self.graph.add((self.iati['activity/' + self.id + '/condition/' + str(hash_condition)],
RDFS.label,
condition_text))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/condition/' + str(hash_condition)],
self.iati['condition-type'],
self.iati['codelist/ConditionType/' + str(type)]))
def result(self, xml):
'''Converts the XML of the conditions element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
type = AttributeHelper.attribute_key(xml, 'type')
aggregation_status = AttributeHelper.attribute_key(xml, 'aggregation-status')
# Elements
titles = xml.findall('title')
descriptions = xml.findall('description')
indicators = xml.findall('indicator')
# Create hash
# Required: one of title or description
hash = hashlib.md5()
hash_created = False
result_title = AttributeHelper.attribute_text(xml, 'title')
if not result_title == None:
hash.update(result_title[0])
hash_created = True
result_description = AttributeHelper.attribute_text(xml, 'description')
if not result_description == None:
hash.update(result_description[0])
hash_created = True
if hash_created:
hash_result = hash.hexdigest()
self.graph.add((self.iati['activity/' + self.id],
self.iati['activity-result'],
self.iati['activity/' + self.id + '/result/' + str(hash_result)]))
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result)],
RDF.type,
self.iati['result']))
if not titles == []:
for title in titles:
# Text
title_text = AttributeHelper.attribute_language(title, self.default_language)
if not title_text == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result)],
RDFS.label,
title_text))
if not descriptions == []:
for description in descriptions:
# Keys
type = AttributeHelper.attribute_key(description, 'type')
# Text
description_text = AttributeHelper.attribute_language(description, self.default_language)
if not description_text == None:
# Create hash
# Required: description
hash_description = hashlib.md5()
description_nolanguage = description.text
hash_description.update(description_nolanguage)
hash_location_description = hash_description.hexdigest()
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result)],
self.iati['result-description'],
self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/description/' + str(hash_location_description)]))
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/description/' + str(hash_location_description)],
RDF.type,
self.iati['description']))
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/description/' + str(hash_location_description)],
self.iati['description-text'],
description_text))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/description/' + str(hash_location_description)],
self.iati['description-type'],
self.iati['codelist/DescriptionType/' + str(type)]))
if not indicators == []:
for indicator in indicators:
# Create hash
# Required: one of title or description
hash = hashlib.md5()
hash_created = False
indicator_title = AttributeHelper.attribute_text(indicator, 'title')
if not indicator_title == None:
hash.update(indicator_title[0])
hash_created = True
indicator_description = AttributeHelper.attribute_text(indicator, 'description')
if not indicator_description == None:
hash.update(indicator_description[0])
hash_created = True
if hash_created:
hash_result_indicator = hash.hexdigest()
# Keys
measure = AttributeHelper.attribute_key(indicator, 'measure')
ascending = AttributeHelper.attribute_key(indicator, 'ascending')
# Elements
titles = indicator.findall('title')
descriptions = indicator.findall('description')
periods = indicator.findall('indicator')
baseline = indicator.find('baseline')
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result)],
self.iati['result-indicator'],
self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)]))
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
RDF.type,
self.iati['indicator']))
if not measure == None:
measure = measure.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
self.iati['indicator-measure'],
self.iati['codelist/IndicatorMeasure/' + str(measure)]))
if not ascending == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
self.iati['indicator-ascending'],
Literal(ascending)))
else:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
self.iati['indicator-ascending'],
Literal('True')))
if not titles == []:
for title in titles:
# Text
title_text = AttributeHelper.attribute_language(title, self.default_language)
if not title_text == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
RDFS.label,
title_text))
if not descriptions == []:
for description in descriptions:
# Keys
type = AttributeHelper.attribute_key(description, 'type')
# Text
description_text = AttributeHelper.attribute_language(description, self.default_language)
# Create hash
# Required: description
hash_indicator_description = hashlib.md5()
description_nolanguage = description.text
hash_indicator_description.update(description_nolanguage)
hash_result_indicator_description = hash_indicator_description.hexdigest()
if not description_text == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
self.iati['indicator-description'],
self.iati['activity/' + self.id + '/result' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/description/' +
str(hash_result_indicator_description)]))
self.graph.add((self.iati['activity/' + self.id + '/result' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/description/' +
str(hash_result_indicator_description)],
RDF.type,
self.iati['description']))
self.graph.add((self.iati['activity/' + self.id + '/result' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/description/' +
str(hash_result_indicator_description)],
self.iati['description-text'],
description_text))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.iati['activity/' + self.id + '/result' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/description/' +
str(hash_result_indicator_description)],
self.iati['description-type'],
self.iati['codelist/DescriptionType/' + str(type)]))
if not periods == []:
for period in periods:
# Elements
period_start = period.find('period-start')
period_end = period.find('period-end')
target = period.find('target')
actual = period.find('actual')
# Create hash
# Required: one of period-start (iso-date / text), period-end (iso-date / text)
hash_indicator_period = hashlib.md5()
hash_indicator_period_created= False
if not period_start == None:
# Keys
period_start_date = AttributeHelper.attribute_key(period_start, 'iso-date')
if not period_start_date == None:
hash_indicator_period.update(period_start_date)
hash_indicator_period_created = True
period_start_text = period_start.text
if not period_start_text == None:
hash_indicator_period.update(period_start_text)
hash_indicator_period_created = True
if not period_end == None:
# Keys
period_end_date = AttributeHelper.attribute_key(period_end, 'iso-date')
if not period_end_date == None:
hash_indicator_period.update(period_end_date)
hash_indicator_period_created = True
period_end_text = period_end.text
if not period_end_text == None:
hash_indicator_period.update(period_end_text)
hash_indicator_period_created = True
if hash_indicator_period_created:
hash_result_indicator_period = hash_indicator_period.hexdigest()
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
self.iati['indicator-period'],
self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/period/' +
str(hash_result_indicator_period)]))
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/period/' +
str(hash_result_indicator_period)],
RDF.type,
self.iati['period']))
if not period_start == None:
# Keys
date = AttributeHelper.attribute_key(period_start, 'iso-date')
# Text
period_start_text = AttributeHelper.attribute_language(period_start, self.default_language)
if not date == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/period/' +
str(hash_result_indicator_period)],
self.iati['start-date'],
Literal(date)))
if not period_start_text == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/period/' +
str(hash_result_indicator_period)],
self.iati['start-date-text'],
period_start_text))
if not period_end == None:
# Keys
date = AttributeHelper.attribute_key(period_end, 'iso-date')
# Text
period_end_text = AttributeHelper.attribute_language(period_end, self.default_language)
if not date == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/period/' +
str(hash_result_indicator_period)],
self.iati['end-date'],
Literal(date)))
if not period_end_text == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/period/' +
str(hash_result_indicator_period)],
self.iati['end-date-text'],
period_end_text))
if not target == None:
# Keys
value = AttributeHelper.attribute_key(target, 'value')
if not value == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/period/' +
str(hash_result_indicator_period)],
self.iati['period-target'],
Literal(value)))
if not actual == None:
# Keys
value = AttributeHelper.attribute_key(actual, 'value')
if not value == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator) + '/period/' +
str(hash_result_indicator_period)],
self.iati['period-actual'],
Literal(value)))
if not baseline == None:
# Keys
year = AttributeHelper.attribute_key(baseline, 'year')
value = AttributeHelper.attribute_key(baseline, 'value')
# Elements
comment = baseline.find('comment')
if not value == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
self.iati['baseline-value'],
Literal(value)))
if not year == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
self.iati['baseline-year'],
Literal(year)))
if not comment == None:
# Text
comment_text = AttributeHelper.attribute_language(comment, self.default_language)
if not comment_text == None:
self.graph.add((self.iati['activity/' + self.id + '/result/' + str(hash_result) +
'/indicator/' + str(hash_result_indicator)],
self.iati['baseline-comment'],
comment_text))
class CodelistElements :
'''Class for converting XML elements of IATI codelists to a RDFLib self.graph.'''
def __init__(self, defaults):
'''Initializes class.
Parameters
@defaults: A dictionary of defaults.'''
self.id = defaults['id']
self.default_language = defaults['language']
self.iati = Namespace(defaults['namespace'])
self.codelist = Namespace(self.iati['codelist/'])
self.codelist_uri = Namespace(self.codelist[str(self.id) + '/'])
self.graph = Graph()
self.graph.bind('iati', self.iati)
self.graph.bind('codelist', self.codelist)
self.graph.bind(self.id, self.codelist_uri)
def get_result(self):
'''Returns the resulting self.graph of the activity.
Returns
@graph: The RDFLib self.graph with added statements.'''
return self.graph
def code(self, xml, code, language, category_code):
'''Converts the XML of the code element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.
@code: A list of codes or None.
@language: A list of languages or None.
@category_code: A list of category codes or None.'''
# Text
code = xml.text
if not code == None:
code = " ".join(code.split())
self.graph.add((self.codelist[str(self.id)],
self.iati['codelist-member'],
self.codelist_uri[code]))
self.graph.add((self.codelist_uri[code],
self.iati['member-of-codelist'],
self.codelist[str(self.id)]))
self.graph.add((self.codelist_uri[code],
self.iati['code'],
Literal(code)))
self.graph.add((self.codelist_uri[code],
RDF.type,
self.iati['codelist-code']))
def language(self, xml, code, language, category_code):
'''Converts the XML of the language element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.
@code: A list of codes or None.
@language: A list of languages or None.
@category_code: A list of category codes or None.'''
# Skipped
skip = True
def name(self, xml, code, language, category_code):
'''Converts the XML of the name element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.
@code: A list of codes or None.
@language: A list of languages or None.
@category_code: A list of category codes or None.'''
# Text
if not language == None:
name = AttributeHelper.attribute_language(xml, language[0])
else:
name = AttributeHelper.attribute_language(xml, self.default_language)
if (not code == None) and (not name == None):
self.graph.add((self.codelist_uri[code[0]],
RDFS.label,
name))
def description(self, xml, code, language, category_code):
'''Converts the XML of the description element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.
@code: A list of codes or None.
@language: A list of languages or None.
@category_code: A list of category codes or None.'''
# Text
if not language == None:
description = AttributeHelper.attribute_language(xml, language[0])
else:
description = AttributeHelper.attribute_language(xml, self.default_language)
if (not code == None) and (not description == None):
self.graph.add((self.codelist_uri[code[0]],
RDFS.comment,
description))
def abbreviation(self, xml, code, language, category_code):
'''Converts the XML of the abbreviation element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.
@code: A list of codes or None.
@language: A list of languages or None.
@category_code: A list of category codes or None.'''
# Text
if not language == None:
abbreviation = AttributeHelper.attribute_language(xml, language[0])
else:
abbreviation = AttributeHelper.attribute_language(xml, self.default_language)
if (not code == None) and (not abbreviation == None):
self.graph.add((self.codelist_uri[code[0]],
self.iati['abbreviation'],
abbreviation))
def category(self, xml, code, language, category_code):
'''Converts the XML of the category element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.
@code: A list of codes or None.
@language: A list of languages or None.
@category_code: A list of category codes or None.'''
# Text
category = xml.text
if not category == None:
category = " ".join(category.split())
self.graph.add((self.codelist['category/' + category],
RDF.type,
self.iati['codelist-category']))
self.graph.add((self.codelist_uri[code[0]],
self.iati['in-category'],
self.codelist['category/' + category]))
self.graph.add((self.codelist['category/' + category],
self.iati['has-member'],
self.codelist_uri[code[0]]))
self.graph.add((self.codelist['category/' + category],
self.iati['code'],
Literal(category)))
def category_name(self, xml, code, language, category_code):
'''Converts the XML of the category-name element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.
@code: A list of codes or None.
@language: A list of languages or None.
@category_code: A list of category codes or None.'''
# Text
if not language == None:
name = AttributeHelper.attribute_language(xml, language[0])
else:
name = AttributeHelper.attribute_language(xml, self.default_language)
if (not category_code == None) and (not name == None):
self.graph.add((self.codelist['category/' + category_code[0]],
RDFS.label,
name))
def category_description(self, xml, code, language, category_code):
'''Converts the XML of the category-description element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.
@code: A list of codes or None.
@language: A list of languages or None.
@category_code: A list of category codes or None.'''
# Text
if not language == None:
description = AttributeHelper.attribute_language(xml, language[0])
else:
description = AttributeHelper.attribute_language(xml, self.default_language)
if (not category_code == None) and (not description == None):
self.graph.add((self.codelist['category/' + category_code[0]],
RDFS.comment,
description))
class OrganisationElements :
'''Class for converting XML elements of IATI organisations to a RDFLib self.graph.'''
def __init__(self, defaults):
'''Initializes class.
Parameters
@defaults: A dictionary of defaults.'''
self.id = defaults['id'].replace(" ", "%20")
self.default_language = defaults['language']
self.default_currency = defaults['currency']
self.iati = Namespace(defaults['namespace'])
self.iati_custom = Namespace(defaults['namespace'] + "custom/")
self.org_uri = Namespace(self.iati['organisation/' + self.id])
self.graph = Graph()
self.graph.bind('iati', self.iati)
self.graph.bind('iati-custom', self.iati_custom)
self.graph.bind('owl', 'http://www.w3.org/2002/07/owl#')
self.graph.add((self.org_uri,
RDF.type,
self.iati['organisation']))
self.graph.add((self.org_uri,
OWL.sameAs,
self.iati['codelist/OrganisationIdentifier/' + self.id]))
self.graph.add((self.iati['codelist/OrganisationIdentifier/' + self.id],
OWL.sameAs,
self.org_uri))
def __update_progress(self, element):
'''Updates the progress of the number of elements.
Parameters
@element: A string of the element name.'''
try:
self.progress[element] += 1
except KeyError:
self.progress[element] = 1
def get_result(self):
'''Returns the resulting self.graph of the activity.
Returns
@graph: The RDFLib self.graph with added statements.'''
return self.graph
def process_unknown_tag(self, tag):
'''Returns the correct tag for use in unknown elements.
Parameters
@tag: The original tag.
Returns
@namespace: The RDFLib Namespace to be used.
@name: The name of the tag.'''
tag = tag.replace("{", "").replace("}", "")
if ":" in tag:
if tag[:4] == "http":
return Namespace(tag.replace(" ", "-")), tag.rsplit('/',1)[1].replace(" ", "%20")
else:
tag = tag.split(":")[1]
if tag[:9] == "organisation-":
return Namespace(self.iati[tag.replace(" ", "-")]), tag.replace(" ", "%20")
else:
return Namespace(self.iati["organisation-" + tag.replace(" ", "-")]), str("organisation-" + tag.replace(" ", "%20"))
else:
if tag[:9] == "activity-":
return Namespace(self.iati[tag.replace(" ", "-")]), tag.replace(" ", "%20")
else:
return Namespace(self.iati["organisation-" + tag.replace(" ", "-")]), str("organisation-" + tag.replace(" ", "%20"))
def convert_unknown(self, xml):
'''Converts non-IATI standard elements up to 2 levels to a RDFLib self.graph.
Parameters:
@xml: The XML of this element.'''
if not "ignore" in xml.tag:
namespace, name = self.process_unknown_tag(xml.tag)
children_elements = xml.findall("./")
if children_elements == []:
# No children
if (not xml.text == None) and (not xml.text == ""):
if len(xml.text) > 1:
self.graph.add((self.iati['organisation/' + self.id],
namespace,
Literal(xml.text)))
for key in xml.attrib:
key_text = xml.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
self.graph.add((self.iati['organisation/' + self.id],
URIRef(namespace + '-' + str(key)),
Literal(key_text)))
else:
# Does have children
self.graph.add((self.iati['organisation/' + self.id],
namespace,
self.iati['organisation/' + self.id + '/' + str(name)]))
for key in xml.attrib:
key_text = xml.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
key = key.replace(" ", "-")
self.graph.add((self.iati['organisation/' + self.id + '/' + str(name)],
self.iati_custom[str(key)],
Literal(key_text)))
for child in xml:
children_elements = child.findall("./")
child_namespace, child_name = self.process_unknown_tag(child.tag)
if children_elements == []:
# No grand-children
if not child.text == None:
if len(child.text) > 1:
self.graph.add((self.iati['organisation/' + self.id + '/' + str(name)],
child_namespace,
Literal(child.text)))
for key in child.attrib:
key_text = child.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
key = key.replace(" ", "-")
self.graph.add((self.iati['organisation/' + self.id + '/' + str(name)],
URIRef(child_namespace + '-' + str(key)),
Literal(key_text)))
else:
# Has grand-children
self.graph.add((self.iati['organisation/' + self.id + '/' + str(name)],
URIRef(namespace + '-' + str(child_name)),
self.iati['organisation/' + self.id + '/' + str(name) + '/' + str(child_name)]))
for key in child.attrib:
key_text = child.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
key = key.replace(" ", "-")
self.graph.add((self.iati['organisation/' + self.id + '/' + str(name) + '/' + str(child_name)],
self.iati_custom[str(key)],
Literal(key_text)))
for grandchild in child:
grandchildren_elements = grandchild.findall("./")
grandchild_namespace, grandchild_name = self.process_unknown_tag(grandchild.tag)
if grandchildren_elements == []:
# No grand-grand-children
if not grandchild == None:
if len(grandchild.text) > 1:
self.graph.add((self.iati['organisation/' + self.id + '/' + str(name) + '/' + str(child_name)],
grandchild_namespace,
Literal(grandchild.text)))
for key in grandchild.attrib:
key_text = grandchild.attrib[key]
if "}" in key:
key = key.rsplit('}',1)[1]
if (not key_text == None) and (not key_text == ""):
key = key.replace(" ", "-")
self.graph.add((self.iati['organisation/' + self.id + '/' + str(name) + '/' + str(child_name)],
URIRef(grandchild_namespace + '-' + str(key)),
Literal(key_text)))
else:
# Three levels
print "Three levels for a non-IATI element (" + str(name) + ") is not supported..."
def reporting_org(self, xml):
'''Converts the XML of the reporting-org element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
ref = AttributeHelper.attribute_key(xml, 'ref')
type = AttributeHelper.attribute_key(xml, 'type')
# Text
name = AttributeHelper.attribute_language(xml, self.default_language)
if not ref == None:
ref = ref.replace(" ", "%20")
self.graph.add((self.org_uri,
self.iati['organisation-reporting-org'],
self.org_uri['/reporting-org/' + str(ref)]))
self.graph.add((self.org_uri['/reporting-org/' + str(ref)],
OWL.sameAs,
self.iati['codelist/OrganisationIdentifier/' + str(ref)]))
self.graph.add((self.org_uri['/reporting-org/' + str(ref)],
RDF.type,
self.iati['organisation']))
if not name == None:
self.graph.add((self.org_uri['/reporting-org/' + str(ref)],
RDFS.label,
name))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.org_uri['/reporting-org/' + str(ref)],
self.iati['organisation-type'],
self.iati['codelist/OrganisationType/' + str(type)]))
elif not name == None:
# Create hash
# Required: name
hash = hashlib.md5()
hash.update(name)
hash_name = hash.hexdigest()
self.graph.add((self.org_uri['/reporting-org/' + str(hash_name)],
RDF.type,
self.iati['organisation']))
self.graph.add((self.org_uri['/reporting-org/' + str(hash_name)],
RDFS.label,
name))
if not type == None:
type = type.replace(" ", "%20")
self.graph.add((self.org_uri['/reporting-org/' + str(hash_name)],
self.iati['organisation-type'],
self.iati['codelist/OrganisationType/' + str(type)]))
def iati_identifier(self, xml):
'''Converts the XML of the iati-identifier element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Text
id = xml.text
if not id == None:
id = " ".join(id.split())
self.graph.add((self.org_uri,
self.iati['iati-identifier'],
Literal(id)))
def identifier(self, xml):
'''Converts the XML of the identifier element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Text
id = xml.text
if not id == None:
id = " ".join(id.split())
self.graph.add((self.org_uri,
self.iati['organisation-id'],
Literal(id)))
def name(self, xml):
'''Converts the XML of the name element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Text
name = AttributeHelper.attribute_language(xml, self.default_language)
if not name == None:
self.graph.add((self.org_uri,
RDFS.label,
name))
def total_budget(self, xml):
'''Converts the XML of the total-budget element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Elements
period_start = xml.find('period-start')
period_end = xml.find('period-end')
value = xml.find('value')
# Create hash
# Required: value, period_start (text / iso-date), period_end (text / iso-date)
hash = hashlib.md5()
hash_created = False
if not period_start == None:
# Keys
period_start_date = AttributeHelper.attribute_key(period_start, 'iso-date')
if not period_start_date == None:
hash.update(period_start_date)
hash_created = True
period_start_text = period_start.text
if not period_start_text == None:
hash.update(period_start_text)
hash_created = True
if not period_end == None:
# Keys
period_end_date = AttributeHelper.attribute_key(period_end, 'iso-date')
if not period_end_date == None:
hash.update(period_end_date)
hash_created = True
period_end_text = period_end.text
if not period_end_text == None:
hash.update(period_end_text)
hash_created = True
if not value == None:
value_text = value.text
if not value_text == None:
hash.update(value_text)
hash_created = True
if hash_created:
hash_total_budget = hash.hexdigest()
self.graph.add((self.org_uri,
self.iati['organisation-total-budget'],
self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)]))
self.graph.add((self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)],
RDF.type,
self.iati['budget']))
if not period_start == None:
# Keys
date = AttributeHelper.attribute_key(period_start, 'iso-date')
# Text
period_start_text = AttributeHelper.attribute_language(period_start, self.default_language)
if not date == None:
self.graph.add((self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)],
self.iati['start-date'],
Literal(date)))
if not period_start_text == None:
self.graph.add((self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)],
self.iati['start-date-text'],
period_start_text))
if not period_end == None:
# Keys
date = AttributeHelper.attribute_key(period_end, 'iso-date')
# Text
period_end_text = AttributeHelper.attribute_language(period_end, self.default_language)
if not date == None:
self.graph.add((self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)],
self.iati['end-date'],
Literal(date)))
if not period_end_text == None:
self.graph.add((self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)],
self.iati['end-date-text'],
period_end_text))
if not value == None:
# Keys
currency = AttributeHelper.attribute_key(value, 'currency')
value_date = AttributeHelper.attribute_key(value, 'value-date')
# Text
value_text = value.text
if not value_text == None:
value_text = " ".join(value_text.split())
self.graph.add((self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)],
self.iati['value'],
Literal(value_text)))
if not currency == None:
currency = currency.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(currency)]))
elif not self.default_currency == None:
self.default_currency = self.default_currency.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(self.default_currency)]))
if not value_date == None:
self.graph.add((self.iati['organisation/' + self.id + '/total-budget/' + str(hash_total_budget)],
self.iati['value-date'],
Literal(value_date)))
def recipient_org_budget(self, xml):
'''Converts the XML of the recipient-org-budget element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Elements
recipient_org = xml.find('recipient-org')
period_start = xml.find('period-start')
period_end = xml.find('period-end')
value = xml.find('value')
# Create hash
# Required: value, period_start (text / iso-date), period_end (text / iso-date)
hash = hashlib.md5()
hash_created = False
if not period_start == None:
# Keys
period_start_date = AttributeHelper.attribute_key(period_start, 'iso-date')
if not period_start_date == None:
hash.update(period_start_date)
hash_created = True
period_start_text = period_start.text
if not period_start_text == None:
hash.update(period_start_text)
hash_created = True
if not period_end == None:
# Keys
period_end_date = AttributeHelper.attribute_key(period_end, 'iso-date')
if not period_end_date == None:
hash.update(period_end_date)
hash_created = True
period_end_text = period_end.text
if not period_end_text == None:
hash.update(period_end_text)
hash_created = True
if not value == None:
value_text = value.text
if not value_text == None:
hash.update(value_text)
hash_created = True
if hash_created:
hash_recipient_org_budget = hash.hexdigest()
self.graph.add((self.org_uri,
self.iati['organisation-recipient-org-budget'],
self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)]))
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
RDF.type,
self.iati['budget']))
if not recipient_org == None:
# Keys
ref = AttributeHelper.attribute_key(recipient_org, 'ref')
# Text
recipient_org_text = AttributeHelper.attribute_language(recipient_org, self.default_language)
if not ref == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['recipient-org-ref'],
Literal(ref)))
ref = ref.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['recipient-org'],
self.iati['codelist/OrganisationIdentifier/' + ref]))
if not recipient_org_text == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['recipient-org-name'],
recipient_org_text))
if not period_start == None:
# Keys
date = AttributeHelper.attribute_key(period_start, 'iso-date')
# Text
period_start_text = AttributeHelper.attribute_language(period_start, self.default_language)
if not date == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['start-date'],
Literal(date)))
if not period_start_text == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['start-date-text'],
period_start_text))
if not period_end == None:
# Keys
date = AttributeHelper.attribute_key(period_end, 'iso-date')
# Text
period_end_text = AttributeHelper.attribute_language(period_end, self.default_language)
if not date == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['end-date'],
Literal(date)))
if not period_end_text == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['end-date-text'],
period_end_text))
if not value == None:
# Keys
currency = AttributeHelper.attribute_key(value, 'currency')
value_date = AttributeHelper.attribute_key(value, 'value-date')
# Text
value_text = value.text
if not value_text == None:
value_text = " ".join(value_text.split())
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['value'],
Literal(value_text)))
if not currency == None:
currency = currency.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(currency)]))
elif not self.default_currency == None:
self.default_currency = self.default_currency.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(self.default_currency)]))
if not value_date == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-org-budget/' + str(hash_recipient_org_budget)],
self.iati['value-date'],
Literal(value_date)))
def recipient_country_budget(self, xml):
'''Converts the XML of the recipient-country-budget element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Elements
recipient_country = xml.find('recipient-country')
period_start = xml.find('period-start')
period_end = xml.find('period-end')
value = xml.find('value')
# Create hash
# Required: value, period_start (text / iso-date), period_end (text / iso-date)
hash = hashlib.md5()
hash_created = False
if not period_start == None:
# Keys
period_start_date = AttributeHelper.attribute_key(period_start, 'iso-date')
if not period_start_date == None:
hash.update(period_start_date)
hash_created = True
period_start_text = period_start.text
if not period_start_text == None:
hash.update(period_start_text)
hash_created = True
if not period_end == None:
# Keys
period_end_date = AttributeHelper.attribute_key(period_end, 'iso-date')
if not period_end_date == None:
hash.update(period_end_date)
hash_created = True
period_end_text = period_end.text
if not period_end_text == None:
hash.update(period_end_text)
hash_created = True
if not value == None:
value_text = value.text
if not value_text == None:
hash.update(value_text)
hash_created = True
if hash_created:
hash_recipient_country_budget = hash.hexdigest()
self.graph.add((self.org_uri,
self.iati['organisation-recipient-country-budget'],
self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)]))
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
RDF.type,
self.iati['budget']))
if not recipient_country == None:
# Keys
code = AttributeHelper.attribute_key(recipient_country, 'code')
# Text
recipient_country_text = AttributeHelper.attribute_language(recipient_country, self.default_language)
if not code == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['recipient-country-ref'],
Literal(code)))
code = code.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['recipient-country'],
self.iati['codelist/Country/' + code]))
if not recipient_country_text == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['recipient-country-name'],
recipient_country_text))
if not period_start == None:
# Keys
date = AttributeHelper.attribute_key(period_start, 'iso-date')
# Text
period_start_text = AttributeHelper.attribute_language(period_start, self.default_language)
if not date == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['start-date'],
Literal(date)))
if not period_start_text == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['start-date-text'],
period_start_text))
if not period_end == None:
# Keys
date = AttributeHelper.attribute_key(period_end, 'iso-date')
# Text
period_end_text = AttributeHelper.attribute_language(period_end, self.default_language)
if not date == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['end-date'],
Literal(date)))
if not period_end_text == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['end-date-text'],
period_end_text))
if not value == None:
# Keys
currency = AttributeHelper.attribute_key(value, 'currency')
value_date = AttributeHelper.attribute_key(value, 'value-date')
# Text
value_text = value.text
if not value_text == None:
value_text = " ".join(value_text.split())
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['value'],
Literal(value_text)))
if not currency == None:
currency = currency.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(currency)]))
elif not self.default_currency == None:
self.default_currency = self.default_currency.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['value-currency'],
self.iati['codelist/Currency/' + str(self.default_currency)]))
if not value_date == None:
self.graph.add((self.iati['organisation/' + self.id + '/recipient-country-budget/' + str(hash_recipient_country_budget)],
self.iati['value-date'],
Literal(value_date)))
def document_link(self, xml):
'''Converts the XML of the document-link element to a RDFLib self.graph.
Parameters
@xml: The XML of this element.'''
# Keys
url = AttributeHelper.attribute_key(xml, 'url')
format = AttributeHelper.attribute_key(xml, 'format')
# Elements
titles = xml.findall('title')
category = xml.find('category')
languages = xml.findall('language')
if not url == None:
# Create hash
# Required: url
hash = hashlib.md5()
hash.update(url)
hash_document_link = hash.hexdigest()
self.graph.add((self.org_uri,
self.iati['organisation-document-link'],
self.iati['organisation/' + self.id + '/document-link/' + str(hash_document_link)]))
self.graph.add((self.iati['organisation/' + self.id + '/document-link/' + str(hash_document_link)],
RDF.type,
self.iati['document-link']))
if not url == None:
url = url.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/document-link/' + str(hash_document_link)],
self.iati['url'],
URIRef(url)))
if not format == None:
format = format.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/document-link/' + str(hash_document_link)],
self.iati['format'],
self.iati['codelist/FileFormat/' + str(format)]))
if not titles == []:
for title in titles:
# Text
name = AttributeHelper.attribute_language(title, self.default_language)
self.graph.add((self.iati['organisation/' + self.id + '/document-link/' + str(hash_document_link)],
RDFS.label,
name))
if not category == None:
# Keys
code = AttributeHelper.attribute_key(category, 'code')
if not code == None:
code = code.replace(" ", "%20")
self.graph.add((self.iati['organisation/' + self.id + '/document-link/' + str(hash_document_link)],
self.iati['document-category'],
self.iati['codelist/DocumentCategory/' + str(code)]))
if not languages == []:
for language in languages:
# Text
code = AttributeHelper.attribute_language(language, self.default_language)
if not code == None:
self.graph.add((self.iati['organisation/' + self.id + '/document-link/' + str(hash_document_link)],
self.iati['language'],
Literal(code)))
class ProvenanceElements :
'''Class for converting XML elements of self.iati activities to a RDFLib self.graph.'''
def __init__(self, defaults, namespace):
'''Initializes class.
Parameters
@defaults: A dictionary of default provenance items.
@namespace: The default RDFLib Namespace.'''
self.id = defaults['id'].replace(" ", "%20")
self.type = defaults['type']
self.provenance = defaults['provenance']
self.source_name = defaults['document_name']
self.version = defaults['version']
self.last_updated = defaults['last_updated']
self.iati = namespace
self.source = Namespace(self.iati['graph/' + str(self.type) + '/' + str(self.id)])
self.provenance.add((self.source,
RDF.type,
self.iati['graph']))
if not id == None:
if not self.version == None:
self.provenance.add((self.source,
self.iati['version'],
Literal(self.version)))
if not self.last_updated == None:
self.provenance.add((self.source,
self.iati['last-updated'],
Literal(self.last_updated)))
def get_result(self):
'''Returns the resulting self.graph of the activity.
Returns
@graph: The RDFLib self.graph with added statements.'''
return self.provenance
def maintainer(self, value):
'''Converts the JSON of the maintainer element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-maintainer'],
self.source['/maintainer']))
self.provenance.add((self.source['/maintainer'],
self.iati['maintainer-name'],
Literal(value)))
self.provenance.add((self.source['/maintainer'],
RDF.type,
self.iati['maintainer']))
def maintainer_email(self, value):
'''Converts the JSON of the maintainer_email element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-maintainer'],
self.source['/maintainer']))
self.provenance.add((self.source['/maintainer'],
self.iati['maintainer-email'],
Literal(value)))
self.provenance.add((self.source['/maintainer'],
RDF.type,
self.iati['maintainer']))
def func_id(self, value):
'''Converts the JSON of the id element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-id'],
Literal(value)))
def metadata_created(self, value):
'''Converts the JSON of the metadata_created element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-metadata-created'],
Literal(value)))
def metadata_modified(self, value):
'''Converts the JSON of the metadata_modified element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-metadata-modified'],
Literal(value)))
def relationships(self, value):
'''Converts the JSON of the relationships element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
for entry in value:
if (not entry == 'null') or (not entry == "") or (not entry == None):
self.provenance.add((self.source,
self.iati['source-document-relationship'],
Literal(entry)))
def license(self, value):
'''Converts the JSON of the license element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-license'],
Literal(value)))
def author(self, value):
'''Converts the JSON of the author element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-author'],
self.source['/author']))
self.provenance.add((self.source['/author'],
self.iati['author-name'],
Literal(value)))
self.provenance.add((self.source['/author'],
RDF.type,
self.iati['author']))
def author_email(self, value):
'''Converts the JSON of the author_email element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-author'],
self.source['/author']))
self.provenance.add((self.source['/author'],
self.iati['author-email'],
Literal(value)))
self.provenance.add((self.source['/author'],
RDF.type,
self.iati['author']))
def download_url(self, value):
'''Converts the JSON of the download_url element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
value = value.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['source-document-download-url'],
URIRef(value)))
def state(self, value):
'''Converts the JSON of the state element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-state'],
Literal(value)))
def func_version(self, value):
'''Converts the JSON of the version element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-version'],
Literal(value)))
def license_func_id(self, value):
'''Converts the JSON of the license_id element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-license-id'],
Literal(value)))
def resources(self, value):
'''Converts the JSON of the resources element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
for entry in value[0]:
function = getattr(self, 'resources_' + str(entry))
function(value[0][entry])
def resources_cache_last_updated(self, value):
'''Converts the JSON of the mimetype element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-cache-last-updated'],
Literal(value)))
def resources_mimetype(self, value):
'''Converts the JSON of the mimetype element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-mimetype'],
Literal(value)))
def resources_resource_group_id(self, value):
'''Converts the JSON of the resource_group_id element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-resource-group-id'],
Literal(value)))
def resources_hash(self, value):
'''Converts the JSON of the hash element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-hash'],
Literal(value)))
def resources_description(self, value):
'''Converts the JSON of the description element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-description'],
Literal(value)))
def resources_format(self, value):
'''Converts the JSON of the format element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-format'],
Literal(value)))
def resources_url(self, value):
'''Converts the JSON of the url element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
value = value.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['resources-url'],
URIRef(value)))
def resources_cache_url(self, value):
'''Converts the JSON of the cache_url element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
value = value.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['resources-cache-url'],
URIRef(value)))
def resources_webstore_url(self, value):
'''Converts the JSON of the webstore_url element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
value = value.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['resources-webstore-url'],
URIRef(value)))
def resources_package_id(self, value):
'''Converts the JSON of the package_id element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-package-id'],
Literal(value)))
def resources_mimetype_inner(self, value):
'''Converts the JSON of the mimetype_inner element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-mimetype-inner'],
Literal(value)))
def resources_webstore_last_updated(self, value):
'''Converts the JSON of the webstore_last_updated element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-webstore-last-updated'],
Literal(value)))
def resources_last_modified(self, value):
'''Converts the JSON of the last_modified element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-last-modified'],
Literal(value)))
def resources_position(self, value):
'''Converts the JSON of the position element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-position'],
Literal(value)))
def resources_size(self, value):
'''Converts the JSON of the size element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-size'],
Literal(value)))
def resources_id(self, value):
'''Converts the JSON of the id element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-id'],
Literal(value)))
def resources_resource_type(self, value):
'''Converts the JSON of the resource_type element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-type'],
Literal(value)))
def resources_name(self, value):
'''Converts the JSON of the name element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['resources-name'],
Literal(value)))
def tags(self, value):
'''Converts the JSON of the tags element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
for entry in value:
if (not entry == 'null') or (not entry == "") or (not entry == None):
self.provenance.add((self.source,
self.iati['source-document-tag'],
Literal(entry)))
def groups(self, value):
'''Converts the JSON of the license element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
for entry in value:
if (not entry == 'null') or (not entry == "") or (not entry == None):
self.provenance.add((self.source,
self.iati['source-document-group'],
Literal(entry)))
def name(self, value):
'''Converts the JSON of the name element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
RDFS.label,
Literal(value)))
def isopen(self, value):
'''Converts the JSON of the isopen element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-isopen'],
Literal(value)))
def notes_rendered(self, value):
'''Converts the JSON of the notes_rendered element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-notes-rendered'],
Literal(value)))
def url(self, value):
'''Converts the JSON of the url element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
value = value.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['source-document-url'],
URIRef(value)))
def ckan_url(self, value):
'''Converts the JSON of the ckan_url element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
value = value.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['source-document-ckan-url'],
URIRef(value)))
def notes(self, value):
'''Converts the JSON of the notes element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-notes'],
Literal(value)))
def title(self, value):
'''Converts the JSON of the title element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-title'],
Literal(value)))
def ratings_average(self, value):
'''Converts the JSON of the ratings_average element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-ratings-average'],
Literal(value)))
def extras(self, value):
'''Converts the JSON of the ratings_average element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
for entry in value:
function = getattr(self, 'extras_' + str(entry.replace('-','_')))
function(value[entry])
def extras_publisher_iati_id(self, value):
'''Converts the JSON of the publisher_iati_id element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-publisher-iati-id'],
self.iati['codelist/OrganisationIdentifier/' + str("%20".join(value.split()))]))
def extras_activity_period_from(self, value):
'''Converts the JSON of the activity_period-from element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-activity-period-from'],
Literal(value)))
def extras_activity_period_to(self, value):
'''Converts the JSON of the activity_period-to element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-activity-period-to'],
Literal(value)))
def extras_archive_file(self, value):
'''Converts the JSON of the archive_file element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-archive-file'],
Literal(value)))
def extras_verified(self, value):
'''Converts the JSON of the verified element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-verified'],
Literal(value)))
def extras_publisher_organization_type(self, value):
'''Converts the JSON of the publisher_organization_type element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
value = value.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['extras-publisher-organization-type'],
self.iati['codelist/OrganisationType/' + str(value)]))
def extras_language(self, value):
'''Converts the JSON of the language element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-language'],
Literal(value)))
def extras_country(self, value):
'''Converts the JSON of the country element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
value = value.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['extras-country'],
self.iati['codelist/Country/' + str(value)]))
def extras_filetype(self, value):
'''Converts the JSON of the filetype element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-filetype'],
Literal(value)))
def extras_record_updated(self, value):
'''Converts the JSON of the record_updated element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-record-updated'],
Literal(value)))
def extras_activity_count(self, value):
'''Converts the JSON of the activity_count element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-activity-count'],
Literal(value)))
def extras_publisher_country(self, value):
'''Converts the JSON of the publisher_country element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
value = value.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['extras-publisher-country'],
self.iati['codelist/Country/' + str(value)]))
def extras_data_updated(self, value):
'''Converts the JSON of the data_updated element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-data-updated'],
Literal(value)))
def extras_publishertype(self, value):
'''Converts the JSON of the publishertype element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-publishertype'],
Literal(value)))
def extras_donors(self, value):
'''Converts the JSON of the donors element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
for entry in value:
self.provenance.add((self.source,
self.iati['extras-donor'],
Literal(entry)))
def extras_donors_country(self, value):
'''Converts the JSON of the donors_country element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
for entry in value:
entry = entry.replace(" ", "%20")
self.provenance.add((self.source,
self.iati['extras-donor-country'],
self.iati['codelist/Country/' + str(entry)]))
def extras_donors_type(self, value):
'''Converts the JSON of the donors_country element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
for entry in value:
self.provenance.add((self.source,
self.iati['extras-donor-type'],
Literal(entry)))
def extras_department(self, value):
'''Converts the JSON of the department element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['extras-department'],
Literal(value)))
def ratings_count(self, value):
'''Converts the JSON of the ratings_count element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-ratings-count'],
Literal(value)))
def revision_func_id(self, value):
'''Converts the JSON of the revision_id element to a RDFLib self.graph.
Parameters
@value: The value of the json.'''
if (not value == 'null') and (not str(value) == "") and (not value == None):
self.provenance.add((self.source,
self.iati['source-document-revision-id'],
Literal(value)))
| 46.494516
| 145
| 0.444275
| 16,617
| 190,767
| 4.985738
| 0.019077
| 0.063924
| 0.040122
| 0.047509
| 0.870848
| 0.839779
| 0.822748
| 0.798547
| 0.769856
| 0.748853
| 0
| 0.003328
| 0.450345
| 190,767
| 4,102
| 146
| 46.505851
| 0.786784
| 0.013058
| 0
| 0.724252
| 0
| 0
| 0.092712
| 0.022266
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.001246
| null | null | 0.000831
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c3eedcd4ef708caf58d14457cd87e7053b6f94ba
| 5,251
|
py
|
Python
|
optionmodels/analyticalmethods.py
|
GBERESEARCH/optionmodels
|
4f2528317eb8bf38238fcf21a0fa286758385f69
|
[
"MIT"
] | 2
|
2021-02-08T22:05:12.000Z
|
2021-09-10T04:29:58.000Z
|
optionmodels/analyticalmethods.py
|
GBERESEARCH/optionmodels
|
4f2528317eb8bf38238fcf21a0fa286758385f69
|
[
"MIT"
] | null | null | null |
optionmodels/analyticalmethods.py
|
GBERESEARCH/optionmodels
|
4f2528317eb8bf38238fcf21a0fa286758385f69
|
[
"MIT"
] | 2
|
2020-12-21T08:36:45.000Z
|
2021-09-10T04:29:59.000Z
|
"""
Analytical option pricing models
"""
import numpy as np
import scipy.stats as si
from optionmodels.utils import Utils
# pylint: disable=invalid-name
class AnalyticalMethods():
"""
Analytical option pricing models
"""
@staticmethod
def black_scholes_merton(**kwargs):
"""
Black-Scholes-Merton Option price
Parameters
----------
S : Float
Stock Price. The default is 100.
K : Float
Strike Price. The default is 100.
T : Float
Time to Maturity. The default is 0.25 (3 Months).
r : Float
Interest Rate. The default is 0.005 (50bps)
q : Float
Dividend Yield. The default is 0.
sigma : Float
Implied Volatility. The default is 0.2 (20%).
option : Str
Type of option. 'put' or 'call'. The default is 'call'.
Returns
-------
opt_price : Float
Option Price.
"""
# Update pricing input parameters to default if not supplied
if 'refresh' in kwargs and kwargs['refresh']:
params = Utils.init_params(kwargs)
S = params['S']
K = params['K']
T = params['T']
r = params['r']
q = params['q']
sigma = params['sigma']
option = params['option']
b = r - q
carry = np.exp((b - r) * T)
d1 = ((np.log(S / K) + (b + (0.5 * sigma ** 2)) * T)
/ (sigma * np.sqrt(T)))
d2 = ((np.log(S / K) + (b - (0.5 * sigma ** 2)) * T)
/ (sigma * np.sqrt(T)))
# Cumulative normal distribution function
Nd1 = si.norm.cdf(d1, 0.0, 1.0)
minusNd1 = si.norm.cdf(-d1, 0.0, 1.0)
Nd2 = si.norm.cdf(d2, 0.0, 1.0)
minusNd2 = si.norm.cdf(-d2, 0.0, 1.0)
if option == "call":
opt_price = ((S * carry * Nd1) - (K * np.exp(-r * T) * Nd2))
if option == 'put':
opt_price = ((K * np.exp(-r * T) * minusNd2) -
(S * carry * minusNd1))
return opt_price
@staticmethod
def black_scholes_merton_vega(**kwargs):
"""
Black-Scholes-Merton Option Vega
Parameters
----------
S : Float
Stock Price. The default is 100.
K : Float
Strike Price. The default is 100.
T : Float
Time to Maturity. The default is 0.25 (3 Months).
r : Float
Interest Rate. The default is 0.005 (50bps)
q : Float
Dividend Yield. The default is 0.
sigma : Float
Implied Volatility. The default is 0.2 (20%).
option : Str
Type of option. 'put' or 'call'. The default is 'call'.
Returns
-------
opt_vega : Float
Option Vega.
"""
# Update pricing input parameters to default if not supplied
if 'refresh' in kwargs and kwargs['refresh']:
params = Utils.init_params(kwargs)
S = params['S']
K = params['K']
T = params['T']
r = params['r']
q = params['q']
sigma = params['sigma']
b = r - q
carry = np.exp((b - r) * T)
d1 = ((np.log(S / K) + (b + (0.5 * sigma ** 2)) * T)
/ (sigma * np.sqrt(T)))
nd1 = (1 / np.sqrt(2 * np.pi)) * (np.exp(-d1 ** 2 * 0.5))
opt_vega = S * carry * nd1 * np.sqrt(T)
return opt_vega
@staticmethod
def black_76(**kwargs):
"""
Black 76 Futures Option price
Parameters
----------
F : Float
Discounted Futures Price.
K : Float
Strike Price. The default is 100.
T : Float
Time to Maturity. The default is 0.25 (3 Months).
r : Float
Interest Rate. The default is 0.005 (50bps)
sigma : Float
Implied Volatility. The default is 0.2 (20%).
option : Str
Type of option. 'put' or 'call'. The default is 'call'.
Returns
-------
opt_price : Float
Option Price.
"""
# Update pricing input parameters to default if not supplied
if 'refresh' in kwargs and kwargs['refresh']:
params = Utils.init_params(kwargs)
F = params['F']
K = params['K']
T = params['T']
r = params['r']
sigma = params['sigma']
option = params['option']
carry = np.exp(-r * T)
d1 = (np.log(F / K) + (0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(F / K) + (-0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
# Cumulative normal distribution function
Nd1 = si.norm.cdf(d1, 0.0, 1.0)
minusNd1 = si.norm.cdf(-d1, 0.0, 1.0)
Nd2 = si.norm.cdf(d2, 0.0, 1.0)
minusNd2 = si.norm.cdf(-d2, 0.0, 1.0)
if option == "call":
opt_price = ((F * carry * Nd1) - (K * np.exp(-r * T) * Nd2))
if option == 'put':
opt_price = ((K * np.exp(-r * T) * minusNd2)
- (F * carry * minusNd1))
return opt_price
| 29.01105
| 77
| 0.474195
| 657
| 5,251
| 3.759513
| 0.155251
| 0.076923
| 0.092308
| 0.057895
| 0.847368
| 0.772874
| 0.751822
| 0.751822
| 0.751822
| 0.742105
| 0
| 0.046067
| 0.392306
| 5,251
| 180
| 78
| 29.172222
| 0.727985
| 0.349267
| 0
| 0.704225
| 0
| 0
| 0.034023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042254
| false
| 0
| 0.042254
| 0
| 0.140845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7f1373366637fc37de663661ac76f02f695988f2
| 10,090
|
py
|
Python
|
rl_games/algos_tf14/models.py
|
cremebrule/rl_games
|
fc996a0d00438f6747fef86959c8d31ecd7880f9
|
[
"MIT"
] | 193
|
2019-05-28T01:48:56.000Z
|
2022-03-31T07:56:37.000Z
|
rl_games/algos_tf14/models.py
|
cremebrule/rl_games
|
fc996a0d00438f6747fef86959c8d31ecd7880f9
|
[
"MIT"
] | 35
|
2020-01-28T22:15:51.000Z
|
2022-03-28T22:10:54.000Z
|
rl_games/algos_tf14/models.py
|
cremebrule/rl_games
|
fc996a0d00438f6747fef86959c8d31ecd7880f9
|
[
"MIT"
] | 37
|
2019-06-28T01:09:53.000Z
|
2022-03-26T09:14:06.000Z
|
import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
from rl_games.algos_tf14 import networks
tfd = tfp.distributions
def entry_stop_gradients(target, mask):
mask_h = tf.abs(mask-1)
return tf.stop_gradient(mask_h * target) + mask * target
class BaseModel(object):
def is_rnn(self):
return False
class ModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=False, is_train=is_train,reuse=reuse)
#if action_mask_ph is not None:
#masks = tf.layers.dense(tf.to_float(action_mask_ph), actions_num, activation=tf.nn.elu)
#logits = masks + logits
#logits = entry_stop_gradients(logits, tf.to_float(action_mask_ph))
probs = tf.nn.softmax(logits)
# Gumbel Softmax
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=probs)
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.stop_gradient(one_hot_actions))
return neglogp, value, action, entropy, logits
else:
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy
class ModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mu, sigma, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train = is_train, reuse=reuse)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma
class ModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mean, logstd, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train=True, reuse=reuse)
std = tf.exp(logstd)
norm_dist = tfd.Normal(mean, std)
action = mean + std * tf.random_normal(tf.shape(mean))
#action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph is None:
neglogp = self.neglogp(action, mean, std, logstd)
return neglogp, value, action, entropy, mean, std
prev_neglogp = self.neglogp(prev_actions_ph, mean, std, logstd)
return prev_neglogp, value, action, entropy, mean, std
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
class LSTMModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, logstd, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
std = tf.exp(logstd)
action = mu + std * tf.random_normal(tf.shape(mu))
norm_dist = tfd.Normal(mu, std)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, var, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
sigma = tf.sqrt(var)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=False, is_train=is_train, reuse=reuse)
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.nn.softmax(logits))
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=one_hot_actions)
return neglogp, value, action, entropy, states_ph, masks_ph, lstm_state, initial_state, logits
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy, states_ph, masks_ph, lstm_state, initial_state
class AtariDQN(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
'''
TODO: fix is_train
'''
is_train = name == 'agent'
return self.network(name=name, inputs=inputs, actions_num=actions_num, is_train=is_train, reuse=reuse)
| 41.352459
| 167
| 0.637463
| 1,386
| 10,090
| 4.363636
| 0.095238
| 0.051257
| 0.060185
| 0.041336
| 0.873181
| 0.866071
| 0.828704
| 0.825231
| 0.811839
| 0.793155
| 0
| 0.009675
| 0.25223
| 10,090
| 243
| 168
| 41.522634
| 0.791915
| 0.035382
| 0
| 0.710227
| 0
| 0
| 0.033461
| 0
| 0
| 0
| 0
| 0.004115
| 0
| 1
| 0.130682
| false
| 0
| 0.022727
| 0.045455
| 0.323864
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7f1b320787a0eb96139c1bba8952c1bd2ec171d0
| 92
|
py
|
Python
|
timesheet/__init__.py
|
tsnowlan/timesheet
|
4bf852d72c358a6ad641b14f4d851a08d46d26ae
|
[
"MIT"
] | null | null | null |
timesheet/__init__.py
|
tsnowlan/timesheet
|
4bf852d72c358a6ad641b14f4d851a08d46d26ae
|
[
"MIT"
] | null | null | null |
timesheet/__init__.py
|
tsnowlan/timesheet
|
4bf852d72c358a6ad641b14f4d851a08d46d26ae
|
[
"MIT"
] | null | null | null |
from .cli import run_cli
from .version import __version__
def main():
run_cli(obj={})
| 13.142857
| 32
| 0.706522
| 14
| 92
| 4.214286
| 0.571429
| 0.20339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184783
| 92
| 6
| 33
| 15.333333
| 0.786667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6158bb4e85891e4dd923ec7c3a4d746da263c32c
| 51,753
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/test_show_igmp.py
|
nujo/genieparser
|
083b01efc46afc32abe1a1858729578beab50cd3
|
[
"Apache-2.0"
] | 2
|
2021-01-27T03:37:39.000Z
|
2021-01-27T03:40:50.000Z
|
src/genie/libs/parser/iosxe/tests/test_show_igmp.py
|
nujo/genieparser
|
083b01efc46afc32abe1a1858729578beab50cd3
|
[
"Apache-2.0"
] | 1
|
2020-08-01T00:23:31.000Z
|
2020-08-01T00:40:05.000Z
|
src/genie/libs/parser/iosxe/tests/test_show_igmp.py
|
nujo/genieparser
|
083b01efc46afc32abe1a1858729578beab50cd3
|
[
"Apache-2.0"
] | null | null | null |
# Python
import unittest
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Metaparset
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
# Parser
from genie.libs.parser.iosxe.show_igmp import ShowIpIgmpInterface, \
ShowIpIgmpGroupsDetail, \
ShowIpIgmpSsmMapping
# ==================================================
# Unit test for 'show ip igmp interface'
# Unit test for 'show ip igmp vrf <WORD> interface'
# ==================================================
class test_show_ip_igmp_interface(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"vrf": {
"default": {
"interface": {
"GigabitEthernet1": {
"querier_timeout": 266,
"configured_querier_timeout": 266,
"max_groups": 10,
"multicast": {
"designated_router": "10.1.2.1",
"ttl_threshold": 0,
"routing_enable": True,
"dr_this_system": True
},
"group_policy": "test2",
"interface_status": "up",
"query_max_response_time": 10,
"router_version": 3,
"counters": {
"joins": 13,
"leaves": 3,
},
"interface_address": "10.1.2.1/24",
"joined_group": {
"239.3.3.3": {
"number_of_users": 1
},
"224.0.1.40": {
"number_of_users": 1
},
"239.1.1.1": {
"number_of_users": 1
},
"239.4.4.4": {
"number_of_users": 1
},
"239.2.2.2": {
"number_of_users": 1
}
},
"oper_status": "up",
"active_groups": 1,
"last_member_query_count": 2,
"query_interval": 133,
"enable": True,
"querier": "10.1.2.1",
"query_this_system": True,
"configured_query_interval": 133,
"last_member_query_interval": 100,
"host_version": 3
}
},
"global_active_groups": 1,
"global_max_groups": 20
}
}
}
golden_output = {'execute.return_value': '''\
Global IGMP State Limit : 1 active out of 20 max
GigabitEthernet1 is up, line protocol is up
Internet address is 10.1.2.1/24
IGMP is enabled on interface
Current IGMP host version is 3
Current IGMP router version is 3
IGMP query interval is 133 seconds
IGMP configured query interval is 133 seconds
IGMP querier timeout is 266 seconds
IGMP configured querier timeout is 266 seconds
IGMP max query response time is 10 seconds
Last member query count is 2
Last member query response interval is 100 ms
Inbound IGMP access group is test2
IGMP activity: 13 joins, 3 leaves
Interface IGMP State Limit : 1 active out of 10 max
Multicast routing is enabled on interface
Multicast TTL threshold is 0
Multicast designated router (DR) is 10.1.2.1 (this system)
IGMP querying router is 10.1.2.1 (this system)
Multicast groups joined by this system (number of users):
224.0.1.40(1) 239.4.4.4(1) 239.3.3.3(1)
239.2.2.2(1) 239.1.1.1(1)
'''}
golden_parsed_output_1 = {
"vrf": {
"VRF1": {
"interface": {
"GigabitEthernet2": {
"querier_timeout": 266,
"configured_querier_timeout": 266,
"max_groups": 10,
"multicast": {
"designated_router": "10.186.2.1",
"ttl_threshold": 0,
"routing_enable": True,
"routing_table": "VRF1",
"dr_this_system": True
},
"group_policy": "test2",
"interface_status": "up",
"query_max_response_time": 10,
"router_version": 3,
"counters": {
"joins": 9,
"leaves": 0,
},
"interface_address": "10.186.2.1/24",
"joined_group": {
"224.0.1.40": {
"number_of_users": 1
},
"239.1.1.1": {
"number_of_users": 1
},
"239.2.2.2": {
"number_of_users": 1
},
"239.3.3.3": {
"number_of_users": 1
},
"239.4.4.4": {
"number_of_users": 1
}
},
"oper_status": "up",
"active_groups": 0,
"last_member_query_count": 2,
"query_interval": 133,
"enable": True,
"querier": "10.186.2.1",
"query_this_system": True,
"configured_query_interval": 133,
"last_member_query_interval": 100,
"host_version": 3
}
},
"global_active_groups": 0,
"global_max_groups": 20
}
}
}
golden_output_1 = {'execute.return_value': '''\
Global IGMP State Limit : 0 active out of 20 max
GigabitEthernet2 is up, line protocol is up
Internet address is 10.186.2.1/24
IGMP is enabled on interface
Multicast Routing table VRF1
Current IGMP host version is 3
Current IGMP router version is 3
IGMP query interval is 133 seconds
IGMP configured query interval is 133 seconds
IGMP querier timeout is 266 seconds
IGMP configured querier timeout is 266 seconds
IGMP max query response time is 10 seconds
Last member query count is 2
Last member query response interval is 100 ms
Inbound IGMP access group is test2
IGMP activity: 9 joins, 0 leaves
Interface IGMP State Limit : 0 active out of 10 max
Multicast routing is enabled on interface
Multicast TTL threshold is 0
Multicast designated router (DR) is 10.186.2.1 (this system)
IGMP querying router is 10.186.2.1 (this system)
Multicast groups joined by this system (number of users):
224.0.1.40(1) 239.1.1.1(1) 239.2.2.2(1)
239.3.3.3(1) 239.4.4.4(1)
'''}
golden_output_2 = {'execute.return_value': '''
Loopback8 is up, line protocol is up
Internet protocol processing disabled
GigabitEthernet15 is down, line protocol is down
Internet protocol processing disabled
'''}
golden_parsed_output_2 = {
'vrf': {
'default': {
'interface': {
'GigabitEthernet15': {
'interface_status': 'down',
'internet_protocol_processing': False,
'oper_status': 'down',
},
'Loopback8': {
'interface_status': 'up',
'internet_protocol_processing': False,
'oper_status': 'up',
},
},
},
},
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpIgmpInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_default_vrf(self):
self.device = Mock(**self.golden_output)
obj = ShowIpIgmpInterface(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_non_default_vrf(self):
self.device = Mock(**self.golden_output_1)
obj = ShowIpIgmpInterface(device=self.device)
parsed_output = obj.parse(vrf='VRF1')
self.assertEqual(parsed_output,self.golden_parsed_output_1)
def test_golden_2(self):
self.device = Mock(**self.golden_output_2)
obj = ShowIpIgmpInterface(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_2)
# =====================================================
# Unit test for 'show ip igmp groups detail'
# Unit test for 'show ip igmp vrf <WORD> groups detail'
# =====================================================
class test_show_ip_igmp_groups_detail(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"vrf": {
"default": {
"interface": {
"GigabitEthernet1": {
"group": {
"239.1.1.1": {
"group_mode": "include",
"up_time": "00:05:06",
"flags": "L U",
"last_reporter": "10.1.2.1"
},
"239.5.5.5": {
"group_mode": "include",
"up_time": "00:05:06",
"flags": "SG",
"last_reporter": "0.0.0.0"
},
"239.4.4.4": {
"group_mode": "include",
"up_time": "00:05:06",
"flags": "L",
"source": {
"10.4.1.2": {
"up_time": "00:05:06",
"flags": "L",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
}
},
"last_reporter": "10.1.2.1"
},
"239.8.8.8": {
"group_mode": "include",
"up_time": "00:05:06",
"flags": "SS",
"source": {
"10.16.2.1": {
"up_time": "00:05:06",
"flags": "S",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
},
"10.16.2.2": {
"up_time": "00:05:06",
"flags": "S",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
}
},
"last_reporter": "0.0.0.0"
},
"239.6.6.6": {
"group_mode": "include",
"up_time": "00:05:06",
"flags": "SG",
"last_reporter": "0.0.0.0"
},
"239.7.7.7": {
"group_mode": "include",
"up_time": "00:05:06",
"flags": "SS",
"source": {
"10.16.2.1": {
"up_time": "00:05:06",
"flags": "S",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
}
},
"last_reporter": "0.0.0.0"
},
"239.9.9.9": {
"group_mode": "exclude",
"up_time": "00:23:15",
"flags": "Ac",
"expire": "00:06:06",
"last_reporter": "10.1.2.2"
},
"239.2.2.2": {
"group_mode": "include",
"up_time": "00:05:06",
"flags": "L U",
"last_reporter": "10.1.2.1"
},
"224.0.1.40": {
"group_mode": "include",
"up_time": "00:25:33",
"flags": "L U",
"last_reporter": "10.1.2.1"
},
"239.3.3.3": {
"group_mode": "include",
"up_time": "00:05:06",
"flags": "L",
"source": {
"10.4.1.1": {
"up_time": "00:05:06",
"flags": "L",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
}
},
"last_reporter": "10.1.2.1"
}
},
"static_group": {
"239.6.6.6 *": {
"group": "239.6.6.6",
"source": "*",
"up_time": "00:05:06",
"flags": "SG",
"last_reporter": "0.0.0.0"
},
"239.5.5.5 *": {
"group": "239.5.5.5",
"source": "*",
"up_time": "00:05:06",
"flags": "SG",
"last_reporter": "0.0.0.0"
}
},
"join_group": {
"239.8.8.8 10.16.2.2": {
"group": "239.8.8.8",
"source": "10.16.2.2",
"up_time": "00:05:06",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
"flags": "SS",
"last_reporter": "0.0.0.0"
},
"239.8.8.8 10.16.2.1": {
"group": "239.8.8.8",
"source": "10.16.2.1",
"up_time": "00:05:06",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
"flags": "SS",
"last_reporter": "0.0.0.0"
},
"239.4.4.4 10.4.1.2": {
"group": "239.4.4.4",
"source": "10.4.1.2",
"up_time": "00:05:06",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
"flags": "L",
"last_reporter": "10.1.2.1"
},
"239.9.9.9 *": {
"group": "239.9.9.9",
"source": "*",
"expire": "00:06:06",
"up_time": "00:23:15",
"flags": "Ac",
"last_reporter": "10.1.2.2"
},
"224.0.1.40 *": {
"group": "224.0.1.40",
"source": "*",
"up_time": "00:25:33",
"flags": "L U",
"last_reporter": "10.1.2.1"
},
"239.7.7.7 10.16.2.1": {
"group": "239.7.7.7",
"source": "10.16.2.1",
"up_time": "00:05:06",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
"flags": "SS",
"last_reporter": "0.0.0.0"
},
"239.3.3.3 10.4.1.1": {
"group": "239.3.3.3",
"source": "10.4.1.1",
"up_time": "00:05:06",
"forward": True,
"csr_exp": "stopped",
"v3_exp": "stopped",
"flags": "L",
"last_reporter": "10.1.2.1"
},
"239.2.2.2 *": {
"group": "239.2.2.2",
"source": "*",
"up_time": "00:05:06",
"flags": "L U",
"last_reporter": "10.1.2.1"
},
"239.1.1.1 *": {
"group": "239.1.1.1",
"source": "*",
"up_time": "00:05:06",
"flags": "L U",
"last_reporter": "10.1.2.1"
}
}
}
}
}
}
}
golden_output = {'execute.return_value': '''\
Flags: L - Local, U - User, SG - Static Group, VG - Virtual Group,
SS - Static Source, VS - Virtual Source,
Ac - Group accounted towards access control limit
Interface: GigabitEthernet1
Group: 239.1.1.1
Flags: L U
Uptime: 00:05:06
Group mode: INCLUDE
Last reporter: 10.1.2.1
Source list is empty
Interface: GigabitEthernet1
Group: 239.3.3.3
Flags: L
Uptime: 00:05:06
Group mode: INCLUDE
Last reporter: 10.1.2.1
Group source list: (C - Cisco Src Report, U - URD, R - Remote, S - Static,
V - Virtual, M - SSM Mapping, L - Local,
Ac - Channel accounted towards access control limit)
Source Address Uptime v3 Exp CSR Exp Fwd Flags
10.4.1.1 00:05:06 stopped stopped Yes L
Interface: GigabitEthernet1
Group: 239.2.2.2
Flags: L U
Uptime: 00:05:06
Group mode: INCLUDE
Last reporter: 10.1.2.1
Source list is empty
Interface: GigabitEthernet1
Group: 239.5.5.5
Flags: SG
Uptime: 00:05:06
Group mode: INCLUDE
Last reporter: 0.0.0.0
Source list is empty
Interface: GigabitEthernet1
Group: 239.4.4.4
Flags: L
Uptime: 00:05:06
Group mode: INCLUDE
Last reporter: 10.1.2.1
Group source list: (C - Cisco Src Report, U - URD, R - Remote, S - Static,
V - Virtual, M - SSM Mapping, L - Local,
Ac - Channel accounted towards access control limit)
Source Address Uptime v3 Exp CSR Exp Fwd Flags
10.4.1.2 00:05:06 stopped stopped Yes L
Interface: GigabitEthernet1
Group: 239.7.7.7
Flags: SS
Uptime: 00:05:06
Group mode: INCLUDE
Last reporter: 0.0.0.0
Group source list: (C - Cisco Src Report, U - URD, R - Remote, S - Static,
V - Virtual, M - SSM Mapping, L - Local,
Ac - Channel accounted towards access control limit)
Source Address Uptime v3 Exp CSR Exp Fwd Flags
10.16.2.1 00:05:06 stopped stopped Yes S
Interface: GigabitEthernet1
Group: 239.6.6.6
Flags: SG
Uptime: 00:05:06
Group mode: INCLUDE
Last reporter: 0.0.0.0
Source list is empty
Interface: GigabitEthernet1
Group: 239.9.9.9
Flags: Ac
Uptime: 00:23:15
Group mode: EXCLUDE (Expires: 00:06:06)
Last reporter: 10.1.2.2
Source list is empty
Interface: GigabitEthernet1
Group: 239.8.8.8
Flags: SS
Uptime: 00:05:06
Group mode: INCLUDE
Last reporter: 0.0.0.0
Group source list: (C - Cisco Src Report, U - URD, R - Remote, S - Static,
V - Virtual, M - SSM Mapping, L - Local,
Ac - Channel accounted towards access control limit)
Source Address Uptime v3 Exp CSR Exp Fwd Flags
10.16.2.1 00:05:06 stopped stopped Yes S
10.16.2.2 00:05:06 stopped stopped Yes S
Interface: GigabitEthernet1
Group: 224.0.1.40
Flags: L U
Uptime: 00:25:33
Group mode: INCLUDE
Last reporter: 10.1.2.1
Source list is empty
'''}
golden_parsed_output_1 = {
"vrf": {
"VRF1": {
"interface": {
"GigabitEthernet2": {
"static_group": {
"239.5.5.5 *": {
"group": "239.5.5.5",
"source": "*",
"last_reporter": "0.0.0.0",
"up_time": "00:06:17",
"flags": "SG"
},
"239.6.6.6 *": {
"group": "239.6.6.6",
"source": "*",
"last_reporter": "0.0.0.0",
"up_time": "00:06:14",
"flags": "SG"
}
},
"join_group": {
"239.8.8.8 10.16.2.2": {
"group": "239.8.8.8",
"source": "10.16.2.2",
"last_reporter": "0.0.0.0",
"flags": "SS",
"forward": True,
"csr_exp": "stopped",
"up_time": "00:05:59",
"v3_exp": "stopped"
},
"239.3.3.3 10.4.1.1": {
"group": "239.3.3.3",
"source": "10.4.1.1",
"last_reporter": "10.186.2.1",
"flags": "L",
"forward": True,
"csr_exp": "stopped",
"up_time": "00:06:24",
"v3_exp": "stopped"
},
"239.1.1.1 *": {
"group": "239.1.1.1",
"source": "*",
"last_reporter": "10.186.2.1",
"up_time": "00:06:24",
"flags": "L U",
"expire": "never"
},
"239.4.4.4 10.4.1.2": {
"group": "239.4.4.4",
"source": "10.4.1.2",
"last_reporter": "10.186.2.1",
"flags": "L",
"forward": True,
"csr_exp": "stopped",
"up_time": "00:06:23",
"v3_exp": "stopped"
},
"239.7.7.7 10.16.2.1": {
"group": "239.7.7.7",
"source": "10.16.2.1",
"last_reporter": "0.0.0.0",
"flags": "SS",
"forward": True,
"csr_exp": "stopped",
"up_time": "00:06:06",
"v3_exp": "stopped"
},
"239.2.2.2 *": {
"group": "239.2.2.2",
"source": "*",
"last_reporter": "10.186.2.1",
"up_time": "00:06:24",
"flags": "L U",
"expire": "never"
},
"239.8.8.8 10.16.2.1": {
"group": "239.8.8.8",
"source": "10.16.2.1",
"last_reporter": "0.0.0.0",
"flags": "SS",
"forward": True,
"csr_exp": "stopped",
"up_time": "00:05:59",
"v3_exp": "stopped"
},
"224.0.1.40 *": {
"group": "224.0.1.40",
"source": "*",
"last_reporter": "10.186.2.1",
"up_time": "00:25:55",
"flags": "L U"
}
},
"group": {
"239.4.4.4": {
"group_mode": "include",
"last_reporter": "10.186.2.1",
"flags": "L",
"source": {
"10.4.1.2": {
"forward": True,
"flags": "L",
"up_time": "00:06:23",
"v3_exp": "stopped",
"csr_exp": "stopped",
}
},
"up_time": "00:06:23"
},
"239.5.5.5": {
"group_mode": "include",
"last_reporter": "0.0.0.0",
"flags": "SG",
"up_time": "00:06:17"
},
"239.1.1.1": {
"group_mode": "exclude",
"last_reporter": "10.186.2.1",
"flags": "L U",
"up_time": "00:06:24",
"expire": "never"
},
"239.3.3.3": {
"group_mode": "include",
"last_reporter": "10.186.2.1",
"flags": "L",
"source": {
"10.4.1.1": {
"forward": True,
"flags": "L",
"up_time": "00:06:24",
"v3_exp": "stopped",
"csr_exp": "stopped",
}
},
"up_time": "00:06:24"
},
"239.6.6.6": {
"group_mode": "include",
"last_reporter": "0.0.0.0",
"flags": "SG",
"up_time": "00:06:14"
},
"239.8.8.8": {
"group_mode": "include",
"last_reporter": "0.0.0.0",
"flags": "SS",
"source": {
"10.16.2.1": {
"forward": True,
"flags": "S",
"up_time": "00:03:56",
"v3_exp": "stopped",
"csr_exp": "stopped",
},
"10.16.2.2": {
"forward": True,
"flags": "S",
"up_time": "00:05:57",
"v3_exp": "stopped",
"csr_exp": "stopped",
}
},
"up_time": "00:05:59"
},
"224.0.1.40": {
"group_mode": "include",
"last_reporter": "10.186.2.1",
"flags": "L U",
"up_time": "00:25:55"
},
"239.7.7.7": {
"group_mode": "include",
"last_reporter": "0.0.0.0",
"flags": "SS",
"source": {
"10.16.2.1": {
"forward": True,
"flags": "S",
"up_time": "00:06:06",
"v3_exp": "stopped",
"csr_exp": "stopped",
}
},
"up_time": "00:06:06"
},
"239.2.2.2": {
"group_mode": "exclude",
"last_reporter": "10.186.2.1",
"flags": "L U",
"up_time": "00:06:24",
"expire": "never"
}
}
}
}
}
}
}
golden_output_1 = {'execute.return_value': '''\
Flags: L - Local, U - User, SG - Static Group, VG - Virtual Group,
SS - Static Source, VS - Virtual Source,
Ac - Group accounted towards access control limit
Interface: GigabitEthernet2
Group: 239.1.1.1
Flags: L U
Uptime: 00:06:24
Group mode: EXCLUDE (Expires: never)
Last reporter: 10.186.2.1
Source list is empty
Interface: GigabitEthernet2
Group: 239.3.3.3
Flags: L
Uptime: 00:06:24
Group mode: INCLUDE
Last reporter: 10.186.2.1
Group source list: (C - Cisco Src Report, U - URD, R - Remote, S - Static,
V - Virtual, M - SSM Mapping, L - Local,
Ac - Channel accounted towards access control limit)
Source Address Uptime v3 Exp CSR Exp Fwd Flags
10.4.1.1 00:06:24 stopped stopped Yes L
Interface: GigabitEthernet2
Group: 239.2.2.2
Flags: L U
Uptime: 00:06:24
Group mode: EXCLUDE (Expires: never)
Last reporter: 10.186.2.1
Source list is empty
Interface: GigabitEthernet2
Group: 239.5.5.5
Flags: SG
Uptime: 00:06:17
Group mode: INCLUDE
Last reporter: 0.0.0.0
Source list is empty
Interface: GigabitEthernet2
Group: 239.4.4.4
Flags: L
Uptime: 00:06:23
Group mode: INCLUDE
Last reporter: 10.186.2.1
Group source list: (C - Cisco Src Report, U - URD, R - Remote, S - Static,
V - Virtual, M - SSM Mapping, L - Local,
Ac - Channel accounted towards access control limit)
Source Address Uptime v3 Exp CSR Exp Fwd Flags
10.4.1.2 00:06:23 stopped stopped Yes L
Interface: GigabitEthernet2
Group: 239.7.7.7
Flags: SS
Uptime: 00:06:06
Group mode: INCLUDE
Last reporter: 0.0.0.0
Group source list: (C - Cisco Src Report, U - URD, R - Remote, S - Static,
V - Virtual, M - SSM Mapping, L - Local,
Ac - Channel accounted towards access control limit)
Source Address Uptime v3 Exp CSR Exp Fwd Flags
10.16.2.1 00:06:06 stopped stopped Yes S
Interface: GigabitEthernet2
Group: 239.6.6.6
Flags: SG
Uptime: 00:06:14
Group mode: INCLUDE
Last reporter: 0.0.0.0
Source list is empty
Interface: GigabitEthernet2
Group: 239.8.8.8
Flags: SS
Uptime: 00:05:59
Group mode: INCLUDE
Last reporter: 0.0.0.0
Group source list: (C - Cisco Src Report, U - URD, R - Remote, S - Static,
V - Virtual, M - SSM Mapping, L - Local,
Ac - Channel accounted towards access control limit)
Source Address Uptime v3 Exp CSR Exp Fwd Flags
10.16.2.1 00:03:56 stopped stopped Yes S
10.16.2.2 00:05:57 stopped stopped Yes S
Interface: GigabitEthernet2
Group: 224.0.1.40
Flags: L U
Uptime: 00:25:55
Group mode: INCLUDE
Last reporter: 10.186.2.1
Source list is empty
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpIgmpGroupsDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_default_vrf(self):
self.device = Mock(**self.golden_output)
obj = ShowIpIgmpGroupsDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_non_default_vrf(self):
self.device = Mock(**self.golden_output_1)
obj = ShowIpIgmpGroupsDetail(device=self.device)
parsed_output = obj.parse(vrf='VRF1')
self.assertEqual(parsed_output,self.golden_parsed_output_1)
golden_parsed_output_3 = {
"vrf": {
"default": {
"interface": {
"Vlan210": {
"group": {
"224.0.1.39": {
"expire": "00:01:29",
"up_time": "1w0d",
"group_mode": "exclude",
"last_reporter": "192.168.135.2"
},
"227.1.1.1": {
"expire": "00:02:25",
"up_time": "1w0d",
"group_mode": "exclude",
"last_reporter": "192.168.135.4"
},
"225.1.1.1": {
"expire": "00:02:26",
"up_time": "1w0d",
"group_mode": "exclude",
"last_reporter": "192.168.135.4"
},
"226.1.1.1": {
"expire": "00:02:22",
"up_time": "1w0d",
"group_mode": "exclude",
"last_reporter": "192.168.135.4"
}
}
},
"Loopback10": {
"join_group": {
"224.0.1.40 *": {
"expire": "00:02:08",
"source": "*",
"group": "224.0.1.40",
"flags": "L U",
"up_time": "1w0d",
"last_reporter": "192.168.151.1"
}
},
"group": {
"224.0.1.40": {
"expire": "00:02:08",
"last_reporter": "192.168.151.1",
"up_time": "1w0d",
"group_mode": "exclude",
"flags": "L U"
}
}
},
"Vlan211": {
"static_group": {
"239.1.1.1 *": {
"expire": "00:02:29",
"source": "*",
"group": "239.1.1.1",
"flags": "L U SG",
"up_time": "4d11h",
"last_reporter": "192.168.76.1"
}
},
"join_group": {
"239.1.1.1 *": {
"expire": "00:02:29",
"source": "*",
"group": "239.1.1.1",
"flags": "L U SG",
"up_time": "4d11h",
"last_reporter": "192.168.76.1"
}
},
"group": {
"224.0.1.39": {
"expire": "00:02:30",
"up_time": "1w0d",
"group_mode": "exclude",
"last_reporter": "192.168.76.2"
},
"232.1.1.1": {
"last_reporter": "192.168.76.4",
"up_time": "1w0d",
"group_mode": "include",
"flags": "SSM"
},
"239.1.1.1": {
"expire": "00:02:29",
"last_reporter": "192.168.76.1",
"up_time": "4d11h",
"group_mode": "exclude",
"flags": "L U SG"
}
}
}
}
}
}
}
golden_output_3 = {'execute.return_value': '''\
Flags: L - Local, U - User, SG - Static Group, VG - Virtual Group,
SS - Static Source, VS - Virtual Source,
Ac - Group accounted towards access control limit
Interface: Vlan211
Group: 239.1.1.1
Flags: L U SG
Uptime: 4d11h
Group mode: EXCLUDE (Expires: 00:02:29)
Last reporter: 192.168.76.1
Source list is empty
Interface: Vlan211
Group: 232.1.1.1
Flags: SSM
Uptime: 1w0d
Group mode: INCLUDE
Last reporter: 192.168.76.4
Group source list: (C - Cisco Src Report, U - URD, R - Remote, S - Static,
V - Virtual, M - SSM Mapping, L - Local,
Ac - Channel accounted towards access control limit)
Source Address Uptime v3 Exp CSR Exp Fwd Flags
192.168.34.2 1w0d 00:02:30 stopped Yes R
Interface: Vlan210
Group: 227.1.1.1
Flags:
Uptime: 1w0d
Group mode: EXCLUDE (Expires: 00:02:25)
Last reporter: 192.168.135.4
Source list is empty
Interface: Vlan210
Group: 226.1.1.1
Flags:
Uptime: 1w0d
Group mode: EXCLUDE (Expires: 00:02:22)
Last reporter: 192.168.135.4
Source list is empty
Interface: Vlan210
Group: 225.1.1.1
Flags:
Uptime: 1w0d
Group mode: EXCLUDE (Expires: 00:02:26)
Last reporter: 192.168.135.4
Source list is empty
Interface: Vlan211
Group: 224.0.1.39
Flags:
Uptime: 1w0d
Group mode: EXCLUDE (Expires: 00:02:30)
Last reporter: 192.168.76.2
Source list is empty
Interface: Vlan210
Group: 224.0.1.39
Flags:
Uptime: 1w0d
Group mode: EXCLUDE (Expires: 00:01:29)
Last reporter: 192.168.135.2
Source list is empty
Interface: Loopback10
Group: 224.0.1.40
Flags: L U
Uptime: 1w0d
Group mode: EXCLUDE (Expires: 00:02:08)
Last reporter: 192.168.151.1
Source list is empty
'''}
def test_golden_3(self):
self.device = Mock(**self.golden_output_3)
obj = ShowIpIgmpGroupsDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_3)
# ===========================================================
# Unit test for 'show ip igmp ssm-mapping <WROD>'
# Unit test for 'show ip igmp vrf <WORD> ssm-mapping <WORD>'
# ============================================================
class test_show_ip_igmp_ssm_mapping(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'vrf': {
'default': {
'ssm_map': {
'10.4.1.1 239.1.1.1': {
'source_addr': '10.4.1.1',
'group_address': '239.1.1.1',
'database': 'static',
},
'10.16.2.2 239.1.1.1': {
'source_addr': '10.16.2.2',
'group_address': '239.1.1.1',
'database': 'static',
},
}
}
}
}
golden_output = {'execute.return_value': '''\
Group address: 239.1.1.1
Database : Static
Source list : 10.4.1.1
10.16.2.2
'''}
golden_parsed_output_1 = {
'vrf': {
'VRF1': {
'ssm_map': {
'10.4.1.1 239.1.1.1': {
'source_addr': '10.4.1.1',
'group_address': '239.1.1.1',
'database': 'static',
},
'10.16.2.2 239.1.1.1': {
'source_addr': '10.16.2.2',
'group_address': '239.1.1.1',
'database': 'static',
},
}
}
}
}
golden_output_1 = {'execute.return_value': '''\
Group address: 239.1.1.1
Database : Static
Source list : 10.4.1.1
10.16.2.2
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpIgmpSsmMapping(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(group='239.1.1.1')
def test_golden_default_vrf(self):
self.device = Mock(**self.golden_output)
obj = ShowIpIgmpSsmMapping(device=self.device)
parsed_output = obj.parse(group='239.1.1.1')
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_non_default_vrf(self):
self.device = Mock(**self.golden_output_1)
obj = ShowIpIgmpSsmMapping(device=self.device)
parsed_output = obj.parse(vrf='VRF1', group='239.1.1.1')
self.assertEqual(parsed_output,self.golden_parsed_output_1)
if __name__ == '__main__':
unittest.main()
| 44.96351
| 82
| 0.311808
| 4,118
| 51,753
| 3.809373
| 0.056095
| 0.011984
| 0.009945
| 0.011857
| 0.92548
| 0.904316
| 0.863135
| 0.81915
| 0.778861
| 0.733474
| 0
| 0.121033
| 0.585396
| 51,753
| 1,151
| 83
| 44.96351
| 0.610057
| 0.012598
| 0
| 0.73246
| 0
| 0.01029
| 0.403484
| 0.005912
| 0
| 0
| 0
| 0
| 0.01029
| 1
| 0.01029
| false
| 0
| 0.004677
| 0
| 0.038354
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6165b3354e0b98cf5450e01b57cacaec7ba03406
| 73
|
py
|
Python
|
micropython/main.py
|
carledwards/hello-bluetooth
|
9ec60ef6061eb97aff6b04255fe0f7facc4acdf9
|
[
"MIT"
] | null | null | null |
micropython/main.py
|
carledwards/hello-bluetooth
|
9ec60ef6061eb97aff6b04255fe0f7facc4acdf9
|
[
"MIT"
] | null | null | null |
micropython/main.py
|
carledwards/hello-bluetooth
|
9ec60ef6061eb97aff6b04255fe0f7facc4acdf9
|
[
"MIT"
] | null | null | null |
import ble_uart_peripheral
def demo():
ble_uart_peripheral.demo()
| 10.428571
| 30
| 0.753425
| 10
| 73
| 5.1
| 0.6
| 0.27451
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 6
| 31
| 12.166667
| 0.836066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
61713a28a198a45244a8ba5fd4f62b828d4e060f
| 109
|
py
|
Python
|
clighter/core/common.py
|
bilginyuksel/clighter
|
cff08bd05b049ac44847818cdaac03197d619cc2
|
[
"Apache-2.0"
] | 8
|
2021-09-03T11:20:54.000Z
|
2021-11-08T08:59:30.000Z
|
clighter/core/common.py
|
bilginyuksel/clighter
|
cff08bd05b049ac44847818cdaac03197d619cc2
|
[
"Apache-2.0"
] | 1
|
2021-09-15T20:38:54.000Z
|
2021-09-15T20:38:54.000Z
|
clighter/core/common.py
|
bilginyuksel/clighter
|
cff08bd05b049ac44847818cdaac03197d619cc2
|
[
"Apache-2.0"
] | 1
|
2021-09-11T08:09:51.000Z
|
2021-09-11T08:09:51.000Z
|
import uuid
def generate_id() -> str:
"""
Generates unique id
"""
return uuid.uuid4().hex
| 12.111111
| 27
| 0.568807
| 13
| 109
| 4.692308
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.293578
| 109
| 8
| 28
| 13.625
| 0.779221
| 0.174312
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4edf9a82dc25b752929fa0ba938acac308982233
| 23,869
|
py
|
Python
|
piemmer/test/test_harvest.py
|
HWChang/emmer
|
9d1ca071bd9f8d0e1ed49910de33a865d82df4c2
|
[
"BSD-3-Clause"
] | 2
|
2021-06-11T09:51:39.000Z
|
2021-06-13T16:32:55.000Z
|
piemmer/test/test_harvest.py
|
HWChang/emmer
|
9d1ca071bd9f8d0e1ed49910de33a865d82df4c2
|
[
"BSD-3-Clause"
] | null | null | null |
piemmer/test/test_harvest.py
|
HWChang/emmer
|
9d1ca071bd9f8d0e1ed49910de33a865d82df4c2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
## usage
# at a level above emmer/
# python3 -m emmer.test.test_harvest
from ..main.basic.math import NonDesityMatrix
from ..main.basic.read import RawDataImport, GetFiles
from ..main.advanced.iteration import MinusOneVNE, InfoRichCalling, reproducibility, reproducibility_summary, Kernal
#from ..main.advanced.iteration import MinusOneVNE, MinDataLostFilter, InfoRichCalling, reproducibility, reproducibility_summary, Kernal
from ..harvest import HarvestArgs, EMMER, mergeDataFrame
from ..troubleshoot.err.error import *
from pandas.util.testing import assert_frame_equal
import unittest
import argparse
import shutil
import pandas
import numpy
import glob
import sys
import os
class TestHarvestArgs(unittest.TestCase):
def test_getArgsI(self):
print('\ntest_TestHarvestArgs.getArgsI:')
print(' case 1: non csv file when input a specific file')
sys.argv[1:] = ['-i', 'piemmer/data/sow_test_dir_2/targert_file_1.txt']
with self.assertRaises(ErrorCode1):
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsI()
print('===========================================================')
def test_getArgsQT(self):
print('\ntest_TestHarvestArgs.getArgsQT:')
print(' case 1: do not need to set args.t when use args.q')
sys.argv[1:] = ['-t', '2', '-q']
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsQT()
my_result = processed_args.warning_code
expected_result = '10'
self.assertEqual(my_result, expected_result)
print('===========================================================')
def test_getArgsFZ(self):
print('\ntest_TestHarvestArgs.getArgsFZ:')
print(' case 1: test error handling')
print(' 1.1: unexpected agrs.z number setting when using HardFilter')
sys.argv[1:] = ['-f', 'HardFilter', '-z', '2']
with self.assertRaises(ErrorCode2):
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsFZ()
print(' ---------------------------------------------------')
print(' 1.2: missing agrs.z when using HardFilter')
sys.argv[1:] = ['-f', 'HardFilter']
with self.assertRaises(ErrorCode3):
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsFZ()
print(' ---------------------------------------------------')
print(' 1.3: set agrs.z when using None filter')
sys.argv[1:] = ['-f', 'None', '-z', '0.5']
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsFZ()
my_result = processed_args.warning_code
expected_result = '2'
self.assertEqual(my_result, expected_result)
print('===========================================================')
def test_getArgsUL(self):
print('\ntest_TestHarvestArgs.getArgsUL:')
print(' case 1: missing both args.u and args.l settings')
sys.argv[1:] = ['-f', 'None', '-z', '0.5']
with self.assertRaises(ErrorCode5):
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsUL()
print('===========================================================')
def test_getArgsPS(self):
print('\ntest_TestHarvestArgs.getArgsPS:')
print(' case 1: args.s warning handling')
print(' 1.1: current version of emmer can not generate args.s plots when working on specific csv (args.i)')
print(' Develop Note: this version of piemmer does not have this limitation. Comment out for now. Will remove in the future')
#sys.argv[1:] = ['-i', 'piemmer/data/data_dir_3/group_A.csv', '-s']
#with self.assertRaises(WarningCode3):
#processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
#processed_args.getArgsI()
#processed_args.getArgsPS()
#my_result = processed_args.warning_code
#expected_result = '3'
#self.assertEqual(my_result, expected_result)
print(' ---------------------------------------------------')
print(' 1.2: current version of emmer can not generate args.s plots when input directory only contains one csv file')
sys.argv[1:] = ['-i', 'piemmer/data/data_dir_1/', '-s']
#with self.assertRaises(WarningCode3):
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsI()
processed_args.getArgsPS()
my_result = processed_args.warning_code
expected_result = '3'
self.assertEqual(my_result, expected_result)
print(' ---------------------------------------------------')
print(' case 2: args.p warning handling')
print(' 2.1: current version of emmer can not generate args.p plots when working on specific csv (args.i)')
sys.argv[1:] = ['-i', 'piemmer/data/data_dir_3/group_A.csv', '-p']
#with self.assertRaises(WarningCode5):
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsI()
processed_args.getArgsPS()
my_result = processed_args.warning_code
expected_result = '5'
self.assertEqual(my_result, expected_result)
print(' ---------------------------------------------------')
print(' 2.2: current version of emmer can not generate args.p plots when input directory only contains one csv file')
sys.argv[1:] = ['-i', 'piemmer/data/data_dir_1', '-p']
#with self.assertRaises(WarningCode6):
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsI()
processed_args.getArgsPS()
my_result = processed_args.warning_code
expected_result = '5'
self.assertEqual(my_result, expected_result)
print('===========================================================')
def test_getArgsC(self):
print('\ntest_TestHarvestArgs.getArgsC:')
print(' case 1: args.c warning handling')
print(' 1.1: user set args.c at 0')
sys.argv[1:] = ['-c', '0']
with self.assertRaises(ErrorCode47):
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsC()
print(' ---------------------------------------------------')
print(' 1.2: args.c > CPU in the computer')
sys.argv[1:] = ['-c', '10000000000']
with self.assertRaises(ErrorCode47):
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsC()
print(' ---------------------------------------------------')
print(' case 2: default setting')
sys.argv[1:] = []
processed_args = HarvestArgs(suppress = True, silence = False, neglect = True)
processed_args.getArgsC()
my_result = processed_args.num_cpu
expected_result = 1
self.assertEqual(my_result, expected_result)
print('===========================================================')
class TestEMMER(unittest.TestCase):
def test_EMMER(self):
print('\ntest_EMMER:')
print(' input_dir: "piemmer/data/data_dir_1"')
input_dir = 'piemmer/data/data_dir_1'
print(' output_file_tag: "test1"')
output_file_tag = 'test1'
print(' detection_limit: 0')
detection_limit = 0
print(' tolerance: 1')
tolerance = 1
print(' filter: "None"')
filter = 'None'
print(' upper_threshold_factor: 1')
upper_threshold_factor = 1
print(' lower_threshold_factor: 1')
lower_threshold_factor = 1
print(' specific_csv: False')
specific_csv = False
print(' information-rich threshold: 1')
infoRich_threshold = 1
print(' quick_look: True')
quick_look = True
print(' use_fractional_abundance: True')
use_fractional_abundance = True
print(' normalize: False')
normalize = False
print(' num_cpu: 1')
num_cpu = 1
one_file = EMMER(input_dir = input_dir, output_file_tag = output_file_tag,
detection_limit = detection_limit, tolerance = tolerance,
filter = filter, upper_threshold_factor = upper_threshold_factor,
lower_threshold_factor = lower_threshold_factor,
specific_csv = specific_csv, infoRich_threshold = infoRich_threshold,
notebook_name = '', neglect = '', normalize = normalize,
num_cpu = num_cpu, quick_look = quick_look,
use_fractional_abundance = use_fractional_abundance)
my_result = one_file.input_file_names
expected_result = ['piemmer/data/data_dir_1/test_case_1.csv']
self.assertListEqual(my_result, expected_result)
shutil.rmtree('output')
print('===========================================================')
def test_singleFile(self):
print(' ---------------------------------------------------')
print(' case 1: HardFilter')
print(' 1.1: hypothetical data')
print(' input_dir: "piemmer/data/data_dir_1"')
input_dir = 'piemmer/data/data_dir_1'
print(' output_file_tag: "test1"')
output_file_tag = 'test1'
print(' detection_limit: 0')
detection_limit = 0
print(' tolerance: 0.6')
tolerance = 0.6
print(' filter: "HardFilter"')
filter = 'HardFilter'
print(' upper_threshold_factor: 1')
upper_threshold_factor = 1
print(' lower_threshold_factor: 1')
lower_threshold_factor = 1
print(' specific_csv: True')
specific_csv = False
print(' information-rich threshold: 1')
infoRich_threshold = 1
print(' quick_look: True')
quick_look = True
print(' use_fractional_abundance: True')
use_fractional_abundance = True
print(' normalize: False')
normalize = False
print(' num_cpu: 1')
num_cpu = 1
one_file = EMMER(input_dir = input_dir, output_file_tag = output_file_tag,
detection_limit = detection_limit, tolerance = tolerance,
filter = filter, upper_threshold_factor = upper_threshold_factor,
lower_threshold_factor = lower_threshold_factor,
specific_csv = specific_csv, infoRich_threshold = infoRich_threshold,
notebook_name = '', neglect = '', normalize = normalize,
num_cpu = num_cpu, quick_look = quick_look,
use_fractional_abundance = use_fractional_abundance)
one_file.singleFile()
my_result = list(one_file.data.filtered_data.data.columns.values)
expected_result = ['col1', 'col2', 'col3', 'col5', 'col6']
self.assertListEqual(my_result, expected_result)
## filter: HardFilter; real data
print(' ---------------------------------------------------')
print(' 1.2: read data (check each filtering steps)')
print(' 1.2.1: raw data')
print(' input_dir: "piemmer/data/data_dir_3/group_A.csv"')
input_dir = 'piemmer/data/data_dir_3/group_A.csv'
print(' output_file_tag: "test1"')
output_file_tag = 'test1'
print(' detection_limit: 0.001')
detection_limit = 0.001
print(' tolerance: 0.33')
tolerance = 0.33
print(' filter: "HardFilter"')
filter = 'HardFilter'
print(' upper_threshold_factor: 1')
upper_threshold_factor = 1
print(' lower_threshold_factor: 1')
lower_threshold_factor = 1
print(' specific_csv: True')
specific_csv = True
print(' information-rich threshold: 1')
infoRich_threshold = 1
print(' quick_look: True')
quick_look = True
print(' use_fractional_abundance: True')
use_fractional_abundance = True
print(' normalize: False')
normalize = False
print(' num_cpu: 1')
num_cpu = 1
one_file = EMMER(input_dir = input_dir, output_file_tag = output_file_tag,
detection_limit = detection_limit, tolerance = tolerance,
filter = filter, upper_threshold_factor = upper_threshold_factor,
lower_threshold_factor = lower_threshold_factor,
specific_csv = specific_csv, infoRich_threshold = infoRich_threshold,
notebook_name = '', neglect = '', normalize = normalize,
num_cpu = num_cpu, quick_look = quick_look,
use_fractional_abundance = use_fractional_abundance)
one_file.singleFile()
# Raw data; after removing empty rows and columns
my_result = list(one_file.data.input_matrix.raw_data.shape)
expected_result = [13, 4809]
self.assertListEqual(my_result, expected_result)
# After removing empty rows and columns
print(' ---------------------------------------------------')
print(' 1.2.2: after removing empty rows and columns')
my_result = list(one_file.data.input_matrix.raw_data_before_filter.shape)
expected_result = [13, 1077]
self.assertListEqual(my_result, expected_result)
# After data filtering
print(' ---------------------------------------------------')
print(' 1.2.3: after filtering')
my_result = list(one_file.data.input_matrix.data.shape)
expected_result = [13, 126]
self.assertListEqual(my_result, expected_result)
## filter: None
print(' ---------------------------------------------------')
print(' case 2: No filter')
print(' input_dir: "piemmer/data/data_dir_1"')
input_dir = 'piemmer/data/data_dir_1'
print(' output_file_tag: "test1"')
output_file_tag = 'test1'
print(' detection_limit: 0')
detection_limit = 0
print(' tolerance: 1')
tolerance = 1
print(' filter: "None"')
filter = 'None'
print(' upper_threshold_factor: 1')
upper_threshold_factor = 1
print(' lower_threshold_factor: 1')
lower_threshold_factor = 1
print(' specific_csv: False')
specific_csv = False
print(' information-rich threshold: 1')
infoRich_threshold = 1
print(' quick_look: True')
quick_look = True
print(' use_fractional_abundance: True')
use_fractional_abundance = True
print(' normalize: False')
normalize = False
print(' num_cpu: 1')
num_cpu = 1
one_file = EMMER(input_dir = input_dir, output_file_tag = output_file_tag,
detection_limit = detection_limit, tolerance = tolerance,
filter = filter, upper_threshold_factor = upper_threshold_factor,
lower_threshold_factor = lower_threshold_factor,
specific_csv = specific_csv, infoRich_threshold = infoRich_threshold,
notebook_name = '', neglect = '', normalize = normalize,
num_cpu = num_cpu, quick_look = quick_look,
use_fractional_abundance = use_fractional_abundance)
one_file.singleFile()
my_result = list(one_file.data.filtered_data.data.columns.values)
expected_result = ['col1', 'col2', 'col3', 'col4', 'col5', 'col6']
self.assertListEqual(my_result, expected_result)
shutil.rmtree('output')
print('===========================================================')
def test_multipleFiles(self):
print(' ---------------------------------------------------')
print(' case 1: set quick_look at False')
print(' input_dir: "piemmer/data/data_dir_2"')
input_dir = 'piemmer/data/data_dir_2'
print(' output_file_tag: "multipleFiles_test1"')
output_file_tag = 'multipleFiles_test2'
print(' detection_limit: 0')
detection_limit = 0
print(' tolerance: 1')
tolerance = 1
print(' filter: "None"')
filter = 'None'
print(' upper_threshold_factor: 1')
upper_threshold_factor = 1
print(' lower_threshold_factor: 1')
lower_threshold_factor = 1
print(' specific_csv: False')
specific_csv = False
print(' information-rich threshold: 2')
infoRich_threshold = 2
print(' quick_look: False')
quick_look = False
print(' use_fractional_abundance: True')
use_fractional_abundance = True
print(' normalize: False')
normalize = False
print(' num_cpu: 1')
num_cpu = 1
one_file = EMMER(input_dir = input_dir, output_file_tag = output_file_tag,
detection_limit = detection_limit, tolerance = tolerance,
filter = filter, upper_threshold_factor = upper_threshold_factor,
lower_threshold_factor = lower_threshold_factor,
specific_csv = specific_csv, infoRich_threshold = infoRich_threshold,
notebook_name = '', neglect = '', normalize = normalize,
num_cpu = num_cpu, quick_look = quick_look,
use_fractional_abundance = use_fractional_abundance)
one_file.multipleFiles()
output_df = one_file.summary_df
my_result = output_df.reindex(sorted(output_df.columns), axis = 1)
data = [[0.00, 28.57],
[50.00, 0.00],
[66.67, 0.00],
[0.00, 100.00],
[33.33, 0.00],
[66.67, 0.00]]
expected_result = pandas.DataFrame(data, columns = ['test_case_1.csv', 'test_case_2.csv'],
index = ['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
assert_frame_equal(my_result, expected_result)
print(' ---------------------------------------------------')
print(' case 2: set quick_look at True')
print(' input_dir: "piemmer/data/data_dir_2"')
input_dir = 'piemmer/data/data_dir_2'
print(' output_file_tag: "multipleFiles_test1"')
output_file_tag = 'multipleFiles_test2'
print(' detection_limit: 0')
detection_limit = 0
print(' tolerance: 1')
tolerance = 1
print(' filter: "None"')
filter = 'None'
print(' upper_threshold_factor: 1')
upper_threshold_factor = 1
print(' lower_threshold_factor: 1')
lower_threshold_factor = 1
print(' specific_csv: False')
specific_csv = False
print(' information-rich threshold: 1')
infoRich_threshold = 1
print(' quick_look: True')
quick_look = True
print(' use_fractional_abundance: True')
use_fractional_abundance = True
print(' normalize: False')
normalize = False
print(' num_cpu: 1')
num_cpu = 1
one_file = EMMER(input_dir = input_dir, output_file_tag = output_file_tag,
detection_limit = detection_limit, tolerance = tolerance,
filter = filter, upper_threshold_factor = upper_threshold_factor,
lower_threshold_factor = lower_threshold_factor,
specific_csv = specific_csv, infoRich_threshold = infoRich_threshold,
notebook_name = '', neglect = '', normalize = normalize,
num_cpu = num_cpu, quick_look = quick_look,
use_fractional_abundance = use_fractional_abundance)
one_file.multipleFiles()
output_df = one_file.summary_df
my_result = output_df.reindex(sorted(output_df.columns), axis = 1)
data = [[1.0, 0.0],
[1.0, 0.0],
[0.0, 1.0],
[1.0, 0.0]]
expected_result = pandas.DataFrame(data, columns = ['test_case_1.csv', 'test_case_2.csv'],
index = ['col2', 'col3', 'col4', 'col6'])
assert_frame_equal(my_result, expected_result)
shutil.rmtree('output')
print('===========================================================')
class TestMergeDataFrame(unittest.TestCase):
def test_mergeDataFrame(self):
print('\ntest_mergeDataFrame:')
print(' input_dir: "piemmer/data/data_dir_2"')
input_dir = 'piemmer/data/data_dir_2'
print(' output_file_tag: "test2"')
output_file_tag = 'test2'
print(' detection_limit: 0')
detection_limit = 0
print(' tolerance: 1')
tolerance = 1
print(' filter: "None"')
filter = 'None'
print(' upper_threshold_factor: 1')
upper_threshold_factor = 1
print(' lower_threshold_factor: 1')
lower_threshold_factor = 1
print(' specific_csv: False')
specific_csv = False
print(' information-rich threshold: 1')
infoRich_threshold = 1
print(' quick_look: True')
quick_look = True
print(' use_fractional_abundance: True')
use_fractional_abundance = True
print(' normalize: False')
normalize = False
print(' num_cpu: 1')
num_cpu = 1
one_file = EMMER(input_dir = input_dir, output_file_tag = output_file_tag,
detection_limit = detection_limit, tolerance = tolerance,
filter = filter, upper_threshold_factor = upper_threshold_factor,
lower_threshold_factor = lower_threshold_factor,
specific_csv = specific_csv, infoRich_threshold = infoRich_threshold,
notebook_name = '', neglect = '', normalize = normalize,
num_cpu = num_cpu, quick_look = quick_look,
use_fractional_abundance = use_fractional_abundance)
one_file.multipleFiles()
transform_info = mergeDataFrame(EMMER_class = one_file, select = 'filtered_infoRich',
file_name_list = one_file.clean_df_file_names,
info_rich_list = one_file.collections_of_info_rich_features,
notebook_name = '', normalize = normalize, neglect = True)
my_result = one_file.merged_dataframe.shape
expected_result = (13, 4)
self.assertEqual(my_result, expected_result)
shutil.rmtree('output')
print('===========================================================')
if __name__ == '__main__':
unittest.main()
| 44.44879
| 145
| 0.556077
| 2,420
| 23,869
| 5.21281
| 0.102893
| 0.066587
| 0.028855
| 0.027111
| 0.80872
| 0.795957
| 0.772969
| 0.756243
| 0.71415
| 0.705509
| 0
| 0.019958
| 0.296787
| 23,869
| 536
| 146
| 44.531716
| 0.731606
| 0.03427
| 0
| 0.711712
| 0
| 0.011261
| 0.271551
| 0.11239
| 0
| 0
| 0
| 0
| 0.04955
| 1
| 0.022523
| false
| 0
| 0.031532
| 0
| 0.060811
| 0.337838
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4efc19e4799e623de3a3a6622dbfea5acbe46bb6
| 178
|
py
|
Python
|
src/models/adjudicator/__init__.py
|
TDSVirtru/parkinglot
|
3895b4019ad70a1613e30483e98ac823e5cc8d64
|
[
"MIT"
] | null | null | null |
src/models/adjudicator/__init__.py
|
TDSVirtru/parkinglot
|
3895b4019ad70a1613e30483e98ac823e5cc8d64
|
[
"MIT"
] | null | null | null |
src/models/adjudicator/__init__.py
|
TDSVirtru/parkinglot
|
3895b4019ad70a1613e30483e98ac823e5cc8d64
|
[
"MIT"
] | null | null | null |
"""The adjudicator model."""
from .adjudicator import Adjudicator # noqa: F401
from .adjudicator import PREFERRED # noqa: F401
from .adjudicator import ALLOWED # noqa: F401
| 25.428571
| 50
| 0.747191
| 21
| 178
| 6.333333
| 0.428571
| 0.338346
| 0.473684
| 0.345865
| 0.43609
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060403
| 0.162921
| 178
| 6
| 51
| 29.666667
| 0.832215
| 0.314607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f63d8eac0015fe3c80a39888cd020006f4a2e226
| 88
|
py
|
Python
|
dotdotdot/__init__.py
|
mark-loeser/dotdotdot
|
c29adbe1a81ca54899f7fd54eca86d60fb6be90f
|
[
"BSD-2-Clause"
] | null | null | null |
dotdotdot/__init__.py
|
mark-loeser/dotdotdot
|
c29adbe1a81ca54899f7fd54eca86d60fb6be90f
|
[
"BSD-2-Clause"
] | 4
|
2019-02-14T18:30:03.000Z
|
2019-02-22T16:48:59.000Z
|
dotdotdot/__init__.py
|
mark-loeser/dotdotdot
|
c29adbe1a81ca54899f7fd54eca86d60fb6be90f
|
[
"BSD-2-Clause"
] | 2
|
2019-09-03T15:52:54.000Z
|
2019-09-10T17:42:10.000Z
|
from .config import load
from .config import ConfigException
from .config import Config
| 22
| 35
| 0.829545
| 12
| 88
| 6.083333
| 0.416667
| 0.410959
| 0.657534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 88
| 3
| 36
| 29.333333
| 0.960526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9cbc2a864c6368091ad6b8872ee200c4ed82cacd
| 1,802
|
py
|
Python
|
refreshbooks/test/test_transports.py
|
fgregg/refreshbooks
|
cfd65ecd38cb6be3b61dbf6a01f93800603f34b1
|
[
"MIT"
] | null | null | null |
refreshbooks/test/test_transports.py
|
fgregg/refreshbooks
|
cfd65ecd38cb6be3b61dbf6a01f93800603f34b1
|
[
"MIT"
] | null | null | null |
refreshbooks/test/test_transports.py
|
fgregg/refreshbooks
|
cfd65ecd38cb6be3b61dbf6a01f93800603f34b1
|
[
"MIT"
] | null | null | null |
from mock import patch, Mock, sentinel
from nose.tools import raises
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from refreshbooks.exceptions import TransportException
@attr('integration')
@raises(TransportException)
def test_urllib2_transport_exception():
from refreshbooks.transports.use_urllib2 import Transport
Transport('http://httpstat.us/400', dict)("foo")
@attr('integration')
def test_urllib2():
from refreshbooks.transports.use_urllib2 import Transport
assert len(Transport('http://httpstat.us/200', dict)("foo")) > 0
@attr('integration')
@raises(TransportException)
def test_httplib2_transport_exception():
try:
import httplib2
except ImportError:
raise SkipTest("module 'httplib2' not installed")
from refreshbooks.transports.use_httplib2 import Transport
Transport('http://httpstat.us/400', dict)("foo")
@attr('integration')
def test_httplib2():
try:
import httplib2
except ImportError:
raise SkipTest("module 'httplib2' not installed")
from refreshbooks.transports.use_httplib2 import Transport
assert len(Transport('http://httpstat.us/200', dict)("foo")) > 0
@attr('integration')
@raises(TransportException)
def test_requests_transport_exception():
try:
import requests
except ImportError:
raise SkipTest("module 'requests' not installed")
from refreshbooks.transports.use_requests import Transport
Transport('http://httpstat.us/400', dict)("foo")
@attr('integration')
def test_requests():
try:
import requests
except ImportError:
raise SkipTest("module 'requests' not installed")
from refreshbooks.transports.use_requests import Transport
assert len(Transport('http://httpstat.us/200', dict)("foo")) > 0
| 32.763636
| 68
| 0.733629
| 207
| 1,802
| 6.299517
| 0.207729
| 0.08589
| 0.119632
| 0.133436
| 0.792945
| 0.792945
| 0.757669
| 0.702454
| 0.702454
| 0.702454
| 0
| 0.021654
| 0.154273
| 1,802
| 54
| 69
| 33.37037
| 0.83399
| 0
| 0
| 0.770833
| 0
| 0
| 0.188679
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.125
| true
| 0
| 0.395833
| 0
| 0.520833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
9cc19395ff4f26044fee32e30b4daf881cd453a3
| 2,581
|
py
|
Python
|
tests/test_transformer.py
|
metataro/sc2_imitation_learning
|
8dca03e9be92e2d8297a4bc34248939af5c7ec3b
|
[
"MIT"
] | 15
|
2021-06-04T09:38:36.000Z
|
2021-12-02T14:01:14.000Z
|
tests/test_transformer.py
|
metataro/sc2_imitation_learning
|
8dca03e9be92e2d8297a4bc34248939af5c7ec3b
|
[
"MIT"
] | 3
|
2021-08-20T13:39:13.000Z
|
2022-03-26T03:25:35.000Z
|
tests/test_transformer.py
|
metataro/sc2_imitation_learning
|
8dca03e9be92e2d8297a4bc34248939af5c7ec3b
|
[
"MIT"
] | 2
|
2021-06-16T08:50:30.000Z
|
2021-07-24T16:38:16.000Z
|
import numpy as np
import tensorflow as tf
from sc2_imitation_learning.common.transformer import SC2EntityTransformerEncoder
class TestSC2EntityTransformerEncoder(tf.test.TestCase):
def test_forward(self):
transformer = SC2EntityTransformerEncoder(num_layers=2, model_dim=2, num_heads=2, dff=4)
entities = tf.constant(np.random.randn(2, 3, 4), dtype=tf.float32)
embedded_entities = transformer(entities)
self.assertEqual(embedded_entities.dtype, tf.float32)
self.assertEqual(embedded_entities.shape.as_list(), [2, 3, 2])
self.assertFalse(tf.reduce_any(tf.math.is_inf(embedded_entities)))
self.assertFalse(tf.reduce_any(tf.math.is_nan(embedded_entities)))
self.assertNotAllClose(embedded_entities, tf.zeros_like(embedded_entities))
def test_mask(self):
transformer = SC2EntityTransformerEncoder(num_layers=2, model_dim=2, num_heads=2, dff=4, mask_value=0)
# no entity masked
entities = tf.constant(np.random.randn(2, 3, 4), dtype=tf.float32)
embedded_entities = transformer(entities)
self.assertEqual(embedded_entities.dtype, tf.float32)
self.assertEqual(embedded_entities.shape.as_list(), [2, 3, 2])
self.assertFalse(tf.reduce_any(tf.math.is_inf(embedded_entities)))
self.assertFalse(tf.reduce_any(tf.math.is_nan(embedded_entities)))
self.assertFalse(tf.reduce_any(embedded_entities == 0.))
# some entities masked
entities = tf.constant(np.concatenate([np.random.randn(2, 3, 2), np.zeros((2, 3, 2))], axis=-1), dtype=tf.float32)
embedded_entities = transformer(entities)
self.assertEqual(embedded_entities.dtype, tf.float32)
self.assertEqual(embedded_entities.shape.as_list(), [2, 3, 2])
self.assertFalse(tf.reduce_any(tf.math.is_inf(embedded_entities)))
self.assertFalse(tf.reduce_any(tf.math.is_nan(embedded_entities)))
self.assertFalse(tf.reduce_any(embedded_entities[:, :, :2] == 0.))
self.assertTrue(tf.reduce_all(embedded_entities[:, :, 2:] == 0.))
# all entities masked
entities = tf.constant(np.zeros((2, 3, 4)), dtype=tf.float32)
embedded_entities = transformer(entities)
self.assertEqual(embedded_entities.dtype, tf.float32)
self.assertEqual(embedded_entities.shape.as_list(), [2, 3, 2])
self.assertFalse(tf.reduce_any(tf.math.is_inf(embedded_entities)))
self.assertFalse(tf.reduce_any(tf.math.is_nan(embedded_entities)))
self.assertTrue(tf.reduce_all(embedded_entities == 0.))
| 47.796296
| 122
| 0.707865
| 338
| 2,581
| 5.221893
| 0.180473
| 0.235694
| 0.096317
| 0.130312
| 0.826062
| 0.814164
| 0.775637
| 0.731445
| 0.731445
| 0.731445
| 0
| 0.029698
| 0.165052
| 2,581
| 53
| 123
| 48.698113
| 0.789327
| 0.022084
| 0
| 0.594595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.567568
| 1
| 0.054054
| false
| 0
| 0.081081
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9cc5e2fad22672cf3f29ce685470867d484c1ff6
| 5,801
|
py
|
Python
|
new/railbase.py
|
Deepakbaskar94/Autonomous_car_base_program
|
9ab79aacccabb4720f9eb419838497c565d01665
|
[
"Apache-2.0"
] | null | null | null |
new/railbase.py
|
Deepakbaskar94/Autonomous_car_base_program
|
9ab79aacccabb4720f9eb419838497c565d01665
|
[
"Apache-2.0"
] | null | null | null |
new/railbase.py
|
Deepakbaskar94/Autonomous_car_base_program
|
9ab79aacccabb4720f9eb419838497c565d01665
|
[
"Apache-2.0"
] | null | null | null |
import RPi.GPIO as GPIO
import time
out1 = 13
out2 = 11
out3 = 15
out4 = 12
i=0
positive=0
negative=0
y=0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(out1,GPIO.OUT)
GPIO.setup(out2,GPIO.OUT)
GPIO.setup(out3,GPIO.OUT)
GPIO.setup(out4,GPIO.OUT)
print ("First calibrate by giving some +ve and -ve values.....")
try:
while(1):
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.LOW)
z = input()
x = int(z)
if x>0 and x<=4000:
for y in range(x,0,-1):
if negative==1:
if i==7:
i=0
else:
i=i+1
y=y+2
negative=0
positive=1
#print((x+1)-y)
if i==0:
GPIO.output(out1,GPIO.HIGH)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==1:
GPIO.output(out1,GPIO.HIGH)
GPIO.output(out2,GPIO.HIGH)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==2:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.HIGH)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==3:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.HIGH)
GPIO.output(out3,GPIO.HIGH)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==4:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.HIGH)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==5:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.HIGH)
GPIO.output(out4,GPIO.HIGH)
time.sleep(0.03)
#time.sleep(1)
elif i==6:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.HIGH)
time.sleep(0.03)
#time.sleep(1)
elif i==7:
GPIO.output(out1,GPIO.HIGH)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.HIGH)
time.sleep(0.03)
#time.sleep(1)
if i==7:
i=0
continue
i=i+1
elif x<0 and x>=-4000:
x=x*-1
for y in range(x,0,-1):
if positive==1:
if i==0:
i=7
else:
i=i-1
y=y+3
positive=0
negative=1
#print((x+1)-y)
if i==0:
GPIO.output(out1,GPIO.HIGH)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==1:
GPIO.output(out1,GPIO.HIGH)
GPIO.output(out2,GPIO.HIGH)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==2:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.HIGH)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==3:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.HIGH)
GPIO.output(out3,GPIO.HIGH)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==4:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.HIGH)
GPIO.output(out4,GPIO.LOW)
time.sleep(0.03)
#time.sleep(1)
elif i==5:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.HIGH)
GPIO.output(out4,GPIO.HIGH)
time.sleep(0.03)
#time.sleep(1)
elif i==6:
GPIO.output(out1,GPIO.LOW)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.HIGH)
time.sleep(0.03)
#time.sleep(1)
elif i==7:
GPIO.output(out1,GPIO.HIGH)
GPIO.output(out2,GPIO.LOW)
GPIO.output(out3,GPIO.LOW)
GPIO.output(out4,GPIO.HIGH)
time.sleep(0.03)
#time.sleep(1)
if i==0:
i=7
continue
i=i-1
except KeyboardInterrupt:
GPIO.cleanup()
| 31.873626
| 64
| 0.407861
| 662
| 5,801
| 3.574018
| 0.090634
| 0.287405
| 0.153424
| 0.237109
| 0.849958
| 0.841082
| 0.825021
| 0.825021
| 0.811496
| 0.811496
| 0
| 0.068257
| 0.479745
| 5,801
| 181
| 65
| 32.049724
| 0.715706
| 0.040855
| 0
| 0.81457
| 0
| 0
| 0.009737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013245
| 0
| 0.013245
| 0.006623
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
9ce7fa1fbf189fb53ed71ef820066b66d00ef80c
| 2,643
|
py
|
Python
|
tests/unit/math/distance/test_torch.py
|
startakovsky/docarray
|
78dd3199d25b3e533cd09643b97359783c193397
|
[
"Apache-2.0"
] | 591
|
2022-01-09T14:39:59.000Z
|
2022-03-31T13:19:39.000Z
|
tests/unit/math/distance/test_torch.py
|
startakovsky/docarray
|
78dd3199d25b3e533cd09643b97359783c193397
|
[
"Apache-2.0"
] | 210
|
2022-01-10T07:59:29.000Z
|
2022-03-31T14:49:18.000Z
|
tests/unit/math/distance/test_torch.py
|
startakovsky/docarray
|
78dd3199d25b3e533cd09643b97359783c193397
|
[
"Apache-2.0"
] | 40
|
2022-01-09T14:52:20.000Z
|
2022-03-31T07:59:45.000Z
|
import numpy as np
import pytest
import torch
from docarray.math.distance.torch import cosine, euclidean, sqeuclidean
@pytest.mark.parametrize(
'x_mat, y_mat, result',
(
(
torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
np.array([[1.192093e-07, 2.53681537e-02], [2.53681537e-02, 0.000000e00]]),
),
(
torch.tensor([[1.0, 2.0, 3.0]]),
torch.tensor([[1.0, 2.0, 3.0]]),
np.array([[1.192093e-07]], dtype=np.float32),
),
(
torch.tensor([[0.0, 0.0, 0.0]]),
torch.tensor([[0.0, 0.0, 0.0]]),
np.array([[1]]),
),
(
torch.tensor([[1.0, 2.0, 3.0]]),
torch.tensor([[19.0, 53.0, 201.0]]),
np.array([[0.06788693]]),
),
),
)
def test_cosine(x_mat, y_mat, result):
np.testing.assert_almost_equal(cosine(x_mat, y_mat), result, decimal=3)
@pytest.mark.parametrize(
'x_mat, y_mat, result',
(
(
torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
np.array([[0, 27], [27, 0]]),
),
(
torch.tensor([[1.0, 2.0, 3.0]]),
torch.tensor([[1.0, 2.0, 3.0]]),
np.array([[0]]),
),
(
torch.tensor([[0.0, 0.0, 0.0]]),
torch.tensor([[0.0, 0.0, 0.0]]),
np.array([[0]]),
),
(
torch.tensor([[1.0, 2.0, 3.0]]),
torch.tensor([[19.0, 53.0, 201.0]]),
np.array([[42128.996]]),
),
),
)
def test_sqeuclidean(x_mat, y_mat, result):
np.testing.assert_almost_equal(sqeuclidean(x_mat, y_mat), result, decimal=3)
@pytest.mark.parametrize(
'x_mat, y_mat, result',
(
(
torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
np.array([[0, 5.19615242], [5.19615242, 0]]),
),
(
torch.tensor([[1.0, 2.0, 3.0]]),
torch.tensor([[1.0, 2.0, 3.0]]),
np.array([[0]]),
),
(
torch.tensor([[0.0, 0.0, 0.0]]),
torch.tensor([[0.0, 0.0, 0.0]]),
np.array([[0]]),
),
(
torch.tensor([[1.0, 2.0, 3.0]]),
torch.tensor([[19.0, 53.0, 201.0]]),
np.array([[205.2535018]]),
),
),
)
def test_euclidean(x_mat, y_mat, result):
np.testing.assert_almost_equal(euclidean(x_mat, y_mat), result, decimal=3)
| 28.419355
| 86
| 0.437382
| 394
| 2,643
| 2.865482
| 0.13198
| 0.053144
| 0.063773
| 0.063773
| 0.806909
| 0.806909
| 0.743136
| 0.723649
| 0.723649
| 0.723649
| 0
| 0.171849
| 0.330685
| 2,643
| 92
| 87
| 28.728261
| 0.466365
| 0
| 0
| 0.576471
| 0
| 0
| 0.022701
| 0
| 0
| 0
| 0
| 0
| 0.035294
| 1
| 0.035294
| false
| 0
| 0.047059
| 0
| 0.082353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
142607bc7a113a738201baa5f02d268045831e79
| 76
|
py
|
Python
|
src/regiosqm_api/__init__.py
|
charnley/RegioSQM
|
4565a666526619d9a1eebb535e11a851ac6f1079
|
[
"MIT"
] | null | null | null |
src/regiosqm_api/__init__.py
|
charnley/RegioSQM
|
4565a666526619d9a1eebb535e11a851ac6f1079
|
[
"MIT"
] | null | null | null |
src/regiosqm_api/__init__.py
|
charnley/RegioSQM
|
4565a666526619d9a1eebb535e11a851ac6f1079
|
[
"MIT"
] | null | null | null |
from regiosqm_api import database, models
from regiosqm_api.app import main
| 25.333333
| 41
| 0.855263
| 12
| 76
| 5.25
| 0.666667
| 0.380952
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118421
| 76
| 2
| 42
| 38
| 0.940299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
147077065fe35fb9cd778465071c8477db522c9f
| 15
|
py
|
Python
|
gdxcompare/profiles/vars.py
|
jackjackk/gdxcompare
|
53ac2b5e0f20e9b466384b16253ef93f1669f65a
|
[
"MIT"
] | 2
|
2017-04-27T08:42:49.000Z
|
2021-05-27T19:58:11.000Z
|
gdxcompare/profiles/vars.py
|
jackjackk/gdxcompare
|
53ac2b5e0f20e9b466384b16253ef93f1669f65a
|
[
"MIT"
] | 4
|
2016-12-14T08:58:08.000Z
|
2017-07-07T15:26:27.000Z
|
gdxcompare/profiles/vars.py
|
jackjackk/gdxcompare
|
53ac2b5e0f20e9b466384b16253ef93f1669f65a
|
[
"MIT"
] | 1
|
2017-07-07T12:37:28.000Z
|
2017-07-07T12:37:28.000Z
|
'^[A-EG-Z_]+$'
| 7.5
| 14
| 0.333333
| 3
| 15
| 1.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 15
| 1
| 15
| 15
| 0.285714
| 0.8
| 0
| 0
| 0
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
14740d314cbb799ac341bb78b1c4277b3f875ad2
| 3,153
|
py
|
Python
|
allure-pytest/test/status/base_teardown_status_test.py
|
vdsbenoit/allure-python
|
7b56b031c42369dd73844105382e9ceb9a88d6cd
|
[
"Apache-2.0"
] | 1
|
2021-02-19T21:00:11.000Z
|
2021-02-19T21:00:11.000Z
|
allure-pytest/test/status/base_teardown_status_test.py
|
vdsbenoit/allure-python
|
7b56b031c42369dd73844105382e9ceb9a88d6cd
|
[
"Apache-2.0"
] | null | null | null |
allure-pytest/test/status/base_teardown_status_test.py
|
vdsbenoit/allure-python
|
7b56b031c42369dd73844105382e9ceb9a88d6cd
|
[
"Apache-2.0"
] | 1
|
2020-08-05T05:40:44.000Z
|
2020-08-05T05:40:44.000Z
|
import pytest
@pytest.fixture
def failed_finalizer_fixture(request):
def fixture_finalizer():
assert False
request.addfinalizer(fixture_finalizer)
def test_failed_finalizer_fixture(failed_finalizer_fixture):
"""
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_failed_finalizer_fixture',
... with_status('failed'),
... has_status_details(with_message_contains("AssertionError"),
... with_trace_contains("def fixture_finalizer():")
... ),
... has_container(allure_report,
... has_after('{fixture}::{finalizer}'.format(
... fixture='failed_finalizer_fixture',
... finalizer='fixture_finalizer'),
... with_status('failed'),
... has_status_details(with_message_contains("AssertionError"),
... with_trace_contains("fixture_finalizer")
... ),
... ),
... )
... )
... )
"""
pass
@pytest.fixture
def pytest_failed_finalizer_fixture(request):
def fixture_finalizer():
pytest.fail()
request.addfinalizer(fixture_finalizer)
def test_pytest_failed_finalizer_fixture(pytest_failed_finalizer_fixture):
"""
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_pytest_failed_finalizer_fixture',
... with_status('failed'),
... has_status_details(with_message_contains("Failed: <Failed instance>"),
... with_trace_contains("def fixture_finalizer():")
... ),
... has_container(allure_report,
... has_after('{fixture}::{finalizer}'.format(
... fixture='pytest_failed_finalizer_fixture',
... finalizer='fixture_finalizer'),
... with_status('failed'),
... has_status_details(with_message_contains("Failed: <Failed instance>"),
... with_trace_contains("fixture_finalizer")
... ),
... ),
... )
... )
... )
"""
pass
| 48.507692
| 128
| 0.402157
| 187
| 3,153
| 6.326203
| 0.160428
| 0.189349
| 0.185968
| 0.118343
| 0.921386
| 0.882502
| 0.777684
| 0.696534
| 0.696534
| 0.696534
| 0
| 0
| 0.498573
| 3,153
| 65
| 129
| 48.507692
| 0.748261
| 0.788773
| 0
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.4
| false
| 0.133333
| 0.066667
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
1491c215b55da8db71809af19ac82b90e4e4e389
| 1,968
|
py
|
Python
|
ignition/script-python/v1/tests/auth-alwaysfail/__logic__/code.py
|
pwalker91/IgnitionSwagger
|
7bbc0a1a692a57a483c82d94570ad10f365d6f4e
|
[
"MIT"
] | null | null | null |
ignition/script-python/v1/tests/auth-alwaysfail/__logic__/code.py
|
pwalker91/IgnitionSwagger
|
7bbc0a1a692a57a483c82d94570ad10f365d6f4e
|
[
"MIT"
] | null | null | null |
ignition/script-python/v1/tests/auth-alwaysfail/__logic__/code.py
|
pwalker91/IgnitionSwagger
|
7bbc0a1a692a57a483c82d94570ad10f365d6f4e
|
[
"MIT"
] | null | null | null |
import apiAuth
from __swagger2__ import requests as swagRq
from __swagger2__ import responses as swagRsp
from v1 import statics as swagStc
PREFIX = swagStc.IGNITION_SWAGGER_CUSTOM_PREFIX
class GET(swagRq.HttpMethod):
SWAGGER = {
# CUSTOM KEYS FOR IA PURPOSES
PREFIX+'auth': [
{'method': apiAuth.simple.allowNone,},
],
PREFIX+'hide': False,
PREFIX+'validateRequest': False,
PREFIX+'validateResponse': False,
PREFIX+'tagGroup': 'Tests',
# ACTUAL SWAGGER DEFINITION
'operationId': 'tests_validation_auth-alwaysfail_get',
'summary': 'GET Test Always Fail',
'description': '''This endpoint will always fail the incoming request''',
'tags': [
'Testing'
],
'consumes': [
'application/x-www-form-urlencoded',
],
'produces': [
'application/json',
],
'parameters': [],
'responses': {
'200': swagStc.GENERIC_FAILURE_RESPONSE,
'default': swagStc.GENERIC_FAILURE_RESPONSE,
}
}
@staticmethod
def __do__(wdr, LOGGER):
return swagRsp.json(success=True, status='SUCCESS', message="I should never get here!")
#END DEF
#END CLASS
class POST(swagRq.HttpMethod):
SWAGGER = {
# CUSTOM KEYS FOR IA PURPOSES
PREFIX+'auth': [
{'method': apiAuth.simple.allowNone,},
],
PREFIX+'hide': False,
PREFIX+'validateRequest': False,
PREFIX+'validateResponse': False,
PREFIX+'tagGroup': 'Tests',
# ACTUAL SWAGGER DEFINITION
'operationId': 'tests_validation_auth-alwaysfail_post',
'summary': 'POST Test Always Fail',
'description': '''This endpoint will always fail the incoming request''',
'tags': [
'Testing'
],
'consumes': [
'application/json',
],
'produces': [
'application/json',
],
'parameters': [],
'responses': {
'200': swagStc.GENERIC_FAILURE_RESPONSE,
'default': swagStc.GENERIC_FAILURE_RESPONSE,
}
}
@staticmethod
def __do__(wdr, LOGGER):
return swagRsp.json(success=True, status='SUCCESS', message="I should never get here!")
#END DEF
#END CLASS
| 24.296296
| 89
| 0.686484
| 215
| 1,968
| 6.130233
| 0.376744
| 0.050076
| 0.063733
| 0.088012
| 0.834598
| 0.834598
| 0.834598
| 0.834598
| 0.834598
| 0.834598
| 0
| 0.005508
| 0.169715
| 1,968
| 81
| 90
| 24.296296
| 0.801102
| 0.07063
| 0
| 0.753846
| 0
| 0
| 0.354201
| 0.05821
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030769
| false
| 0
| 0.061538
| 0.030769
| 0.184615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
149bc238ac3857e4b510bfe1421d72c72cbcfc89
| 11,493
|
py
|
Python
|
victor_hardware_interface/scripts/arm_control_modes_test.py
|
MMintLab/kuka_iiwa_interface
|
0dd258641377263e7275bc63f37cf32eb12f3e56
|
[
"BSD-2-Clause"
] | 5
|
2021-01-11T09:00:26.000Z
|
2021-12-13T15:59:01.000Z
|
victor_hardware_interface/scripts/arm_control_modes_test.py
|
MMintLab/kuka_iiwa_interface
|
0dd258641377263e7275bc63f37cf32eb12f3e56
|
[
"BSD-2-Clause"
] | 35
|
2020-07-01T14:48:40.000Z
|
2021-07-13T18:38:53.000Z
|
victor_hardware_interface/scripts/arm_control_modes_test.py
|
MMintLab/kuka_iiwa_interface
|
0dd258641377263e7275bc63f37cf32eb12f3e56
|
[
"BSD-2-Clause"
] | 1
|
2021-01-08T23:39:17.000Z
|
2021-01-08T23:39:17.000Z
|
#!/usr/bin/env python
#####################################################
# #
# Copyright (c) 2017, UM-ARM-LAB #
# #
# Regression test for arm control modes #
# #
#####################################################
import rospy
import victor_hardware_interface_msgs.msg
import victor_hardware_interface_msgs.srv
class ControlModeTester(object):
def __init__(self):
print("Setting up control mode & motion command...")
rospy.init_node("control_mode_tester")
self.set_control_mode_server = rospy.ServiceProxy("right_arm/set_control_mode_service",
victor_hardware_interface.srv.SetControlMode)
self.set_control_mode_server.wait_for_service()
self.get_control_mode_server = rospy.ServiceProxy("right_arm/get_control_mode_service",
victor_hardware_interface.srv.GetControlMode)
self.get_control_mode_server.wait_for_service()
self.motion_command_pub = rospy.Publisher("right_arm/motion_command",
victor_hardware_interface.msg.MotionCommand,
queue_size=1)
print("...Finished setting up services & publishers")
def joint_position_test(self):
raw_input("Run joint position test - Press ENTER to continue...")
control_mode_command = victor_hardware_interface.msg.ControlModeParameters()
control_mode_command.control_mode.mode = victor_hardware_interface.msg.ControlMode.JOINT_POSITION
control_mode_command.joint_path_execution_params.joint_relative_velocity = 0.5
control_mode_command.joint_path_execution_params.joint_relative_acceleration = 0.5
control_mode_command.joint_path_execution_params.override_joint_acceleration = 0.0
request = victor_hardware_interface.srv.SetControlModeRequest()
request.new_control_mode = control_mode_command
response = self.set_control_mode_server.call(request)
print("SetControlMode response: " + str(response))
assert(response.success is True)
print("...Joint position test complete")
def joint_impedance_test(self):
raw_input("Run joint impedance test - Press ENTER to continue...")
control_mode_command = victor_hardware_interface.msg.ControlModeParameters()
control_mode_command.control_mode.mode = victor_hardware_interface.msg.ControlMode.JOINT_IMPEDANCE
control_mode_command.joint_path_execution_params.joint_relative_velocity = 0.5
control_mode_command.joint_path_execution_params.joint_relative_acceleration = 0.5
control_mode_command.joint_path_execution_params.override_joint_acceleration = 0.0
control_mode_command.joint_impedance_params.joint_damping.joint_1 = 0.7
control_mode_command.joint_impedance_params.joint_damping.joint_2 = 0.7
control_mode_command.joint_impedance_params.joint_damping.joint_3 = 0.7
control_mode_command.joint_impedance_params.joint_damping.joint_4 = 0.7
control_mode_command.joint_impedance_params.joint_damping.joint_5 = 0.7
control_mode_command.joint_impedance_params.joint_damping.joint_6 = 0.7
control_mode_command.joint_impedance_params.joint_damping.joint_7 = 0.7
control_mode_command.joint_impedance_params.joint_stiffness.joint_1 = 1.0
control_mode_command.joint_impedance_params.joint_stiffness.joint_2 = 1.0
control_mode_command.joint_impedance_params.joint_stiffness.joint_3 = 1.0
control_mode_command.joint_impedance_params.joint_stiffness.joint_4 = 1.0
control_mode_command.joint_impedance_params.joint_stiffness.joint_5 = 1.0
control_mode_command.joint_impedance_params.joint_stiffness.joint_6 = 1.0
control_mode_command.joint_impedance_params.joint_stiffness.joint_7 = 1.0
request = victor_hardware_interface.srv.SetControlModeRequest()
request.new_control_mode = control_mode_command
response = self.set_control_mode_server.call(request)
print("SetControlMode response: " + str(response))
assert (response.success is True)
print("...Joint impedance test complete")
def cartesian_pose_test(self):
raw_input("Run cartesian pose test - Press ENTER to continue...")
control_mode_command = victor_hardware_interface.msg.ControlModeParameters()
control_mode_command.control_mode.mode = victor_hardware_interface.msg.ControlMode.CARTESIAN_POSE
control_mode_command.cartesian_path_execution_params.max_velocity.x = 0.5
control_mode_command.cartesian_path_execution_params.max_velocity.y = 0.5
control_mode_command.cartesian_path_execution_params.max_velocity.z = 0.5
control_mode_command.cartesian_path_execution_params.max_velocity.a = 0.5
control_mode_command.cartesian_path_execution_params.max_velocity.b = 0.5
control_mode_command.cartesian_path_execution_params.max_velocity.c = 0.5
control_mode_command.cartesian_path_execution_params.max_nullspace_velocity = 0.5
control_mode_command.cartesian_path_execution_params.max_acceleration.x = 0.5
control_mode_command.cartesian_path_execution_params.max_acceleration.y = 0.5
control_mode_command.cartesian_path_execution_params.max_acceleration.z = 0.5
control_mode_command.cartesian_path_execution_params.max_acceleration.a = 0.5
control_mode_command.cartesian_path_execution_params.max_acceleration.b = 0.5
control_mode_command.cartesian_path_execution_params.max_acceleration.c = 0.5
control_mode_command.cartesian_path_execution_params.max_nullspace_acceleration = 0.5
request = victor_hardware_interface.srv.SetControlModeRequest()
request.new_control_mode = control_mode_command
response = self.set_control_mode_server.call(request)
print("SetControlMode response: " + str(response))
assert (response.success is True)
print("...Cartesian pose test complete")
def cartesian_impedance_test(self):
raw_input("Run cartesian impedance test - Press ENTER to continue...")
control_mode_command = victor_hardware_interface.msg.ControlModeParameters()
control_mode_command.control_mode.mode = victor_hardware_interface.msg.ControlMode.CARTESIAN_IMPEDANCE
control_mode_command.cartesian_path_execution_params.max_velocity.x = 10.0
control_mode_command.cartesian_path_execution_params.max_velocity.y = 10.0
control_mode_command.cartesian_path_execution_params.max_velocity.z = 10.0
control_mode_command.cartesian_path_execution_params.max_velocity.a = 10.0
control_mode_command.cartesian_path_execution_params.max_velocity.b = 10.0
control_mode_command.cartesian_path_execution_params.max_velocity.c = 10.0
control_mode_command.cartesian_path_execution_params.max_nullspace_velocity = 25.0
control_mode_command.cartesian_path_execution_params.max_acceleration.x = 10.0
control_mode_command.cartesian_path_execution_params.max_acceleration.y = 10.0
control_mode_command.cartesian_path_execution_params.max_acceleration.z = 10.0
control_mode_command.cartesian_path_execution_params.max_acceleration.a = 10.0
control_mode_command.cartesian_path_execution_params.max_acceleration.b = 10.0
control_mode_command.cartesian_path_execution_params.max_acceleration.c = 10.0
control_mode_command.cartesian_path_execution_params.max_nullspace_acceleration = 10.0
control_mode_command.cartesian_impedance_params.cartesian_damping.x = 0.7
control_mode_command.cartesian_impedance_params.cartesian_damping.y = 0.7
control_mode_command.cartesian_impedance_params.cartesian_damping.z = 0.7
control_mode_command.cartesian_impedance_params.cartesian_damping.a = 0.7
control_mode_command.cartesian_impedance_params.cartesian_damping.b = 0.7
control_mode_command.cartesian_impedance_params.cartesian_damping.c = 0.7
control_mode_command.cartesian_impedance_params.nullspace_damping = 0.7
control_mode_command.cartesian_impedance_params.cartesian_stiffness.x = 10.0
control_mode_command.cartesian_impedance_params.cartesian_stiffness.y = 10.0
control_mode_command.cartesian_impedance_params.cartesian_stiffness.z = 10.0
control_mode_command.cartesian_impedance_params.cartesian_stiffness.a = 10.0
control_mode_command.cartesian_impedance_params.cartesian_stiffness.b = 10.0
control_mode_command.cartesian_impedance_params.cartesian_stiffness.c = 10.0
control_mode_command.cartesian_impedance_params.nullspace_stiffness = 10.0
control_mode_command.cartesian_control_mode_limits.max_path_deviation.x = 10000000.0
control_mode_command.cartesian_control_mode_limits.max_path_deviation.y = 10000000.0
control_mode_command.cartesian_control_mode_limits.max_path_deviation.z = 10000000.0
control_mode_command.cartesian_control_mode_limits.max_path_deviation.a = 10000000.0
control_mode_command.cartesian_control_mode_limits.max_path_deviation.b = 10000000.0
control_mode_command.cartesian_control_mode_limits.max_path_deviation.c = 10000000.0
control_mode_command.cartesian_control_mode_limits.max_cartesian_velocity.x = 1000.0
control_mode_command.cartesian_control_mode_limits.max_cartesian_velocity.y = 1000.0
control_mode_command.cartesian_control_mode_limits.max_cartesian_velocity.z = 1000.0
control_mode_command.cartesian_control_mode_limits.max_cartesian_velocity.a = 1000.0
control_mode_command.cartesian_control_mode_limits.max_cartesian_velocity.b = 1000.0
control_mode_command.cartesian_control_mode_limits.max_cartesian_velocity.c = 1000.0
control_mode_command.cartesian_control_mode_limits.max_control_force.x = 10.0
control_mode_command.cartesian_control_mode_limits.max_control_force.y = 10.0
control_mode_command.cartesian_control_mode_limits.max_control_force.z = 10.0
control_mode_command.cartesian_control_mode_limits.max_control_force.a = 10.0
control_mode_command.cartesian_control_mode_limits.max_control_force.b = 10.0
control_mode_command.cartesian_control_mode_limits.max_control_force.c = 10.0
control_mode_command.cartesian_control_mode_limits.stop_on_max_control_force = False
request = victor_hardware_interface.srv.SetControlModeRequest()
request.new_control_mode = control_mode_command
response = self.set_control_mode_server.call(request)
print("SetControlMode response: " + str(response))
assert (response.success is True)
print("...Cartesian impedance test complete")
def run_all_tests(self):
print("Starting arm control regression tests...")
raw_input("Is the arm safely away from obstacles? Press ENTER to continue...")
self.joint_position_test()
self.joint_impedance_test()
self.cartesian_pose_test()
self.cartesian_impedance_test()
print("...Finished arm control regression tests")
if __name__ == '__main__':
tester = ControlModeTester()
tester.run_all_tests()
| 67.605882
| 110
| 0.753067
| 1,443
| 11,493
| 5.540541
| 0.085932
| 0.181614
| 0.209381
| 0.206004
| 0.890181
| 0.874797
| 0.859537
| 0.825516
| 0.812383
| 0.778862
| 0
| 0.028
| 0.17341
| 11,493
| 169
| 111
| 68.005917
| 0.813579
| 0.018533
| 0
| 0.201342
| 0
| 0
| 0.071615
| 0.008288
| 0
| 0
| 0
| 0
| 0.026846
| 1
| 0.040268
| false
| 0
| 0.020134
| 0
| 0.067114
| 0.080537
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
14b04bb9e5a378171d04e511eca16c2490c3ea0d
| 16,145
|
py
|
Python
|
idomoo/api/library_api.py
|
Idomoo-RnD/idomoo-python-sdk
|
d5b7c6a55f75196145a7e6d8f53772a92e4ee2ac
|
[
"MIT"
] | 1
|
2018-05-01T10:47:47.000Z
|
2018-05-01T10:47:47.000Z
|
idomoo/api/library_api.py
|
Idomoo-RnD/idomoo-python-sdk
|
d5b7c6a55f75196145a7e6d8f53772a92e4ee2ac
|
[
"MIT"
] | 3
|
2018-06-06T08:14:43.000Z
|
2021-03-15T18:35:52.000Z
|
idomoo/api/library_api.py
|
Idomoo-RnD/idomoo-python-sdk
|
d5b7c6a55f75196145a7e6d8f53772a92e4ee2ac
|
[
"MIT"
] | 2
|
2018-06-26T09:34:20.000Z
|
2019-11-14T10:23:44.000Z
|
# coding: utf-8
"""
Idomoo API
OpenAPI spec version: 2.0
Contact: dev.support@idomoo.com
"""
from __future__ import absolute_import
# python 2 and python 3 compatibility library
import six
from idomoo.api_client import ApiClient
class LibraryApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_scene_library(self, body, **kwargs):
"""Create Scene Library
Use this function to create a new Scene Library.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = client.create_scene_library(body, async=True)
>>> result = thread.get()
:param async bool
:param Body body: (required)
:return: LibraryMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_scene_library_with_http_info(body, **kwargs)
else:
(data) = self.create_scene_library_with_http_info(body, **kwargs)
return data
def create_scene_library_with_http_info(self, body, **kwargs):
"""Create Scene Library
Use this function to create a new Scene Library.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = client.create_scene_library_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param Body body: (required)
:return: LibraryMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'async', '_return_http_data_only', '_preload_content', '_request_timeout']
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_scene_library" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_scene_library`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# Authentication setting
auth_settings = ['Basic authentication']
return self.api_client.call_api(
'/libraries/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryMetadata',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_scene_libraries(self, **kwargs):
"""List Scene Libraries
This function lists all scene libraries available to the authenticated user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = client.get_scene_libraries(async=True)
>>> result = thread.get()
:param async bool
:param str fields: Choose which fields should return. `GET /libraries?fields=id,name,description`
:param bool desc: Allow ascending and descending sorting. `GET /libraries?desc=true`
:param int limit: Set limit of results `GET /libraries?limit=5`
:param int offset: To get a different set of items, you can use the offset and limit parameters in the GET request’s query string `GET /libraries?offset=5&limit=5` Returns scenes 6…10.
:return: LibrariesList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_scene_libraries_with_http_info(**kwargs)
else:
(data) = self.get_scene_libraries_with_http_info(**kwargs)
return data
def get_scene_libraries_with_http_info(self, **kwargs):
"""List Scene Libraries
This function lists all scene libraries available to the authenticated user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = client.get_scene_libraries_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str fields: Choose which fields should return. `GET /libraries?fields=id,name,description`
:param bool desc: Allow ascending and descending sorting. `GET /libraries?desc=true`
:param int limit: Set limit of results `GET /libraries?limit=5`
:param int offset: To get a different set of items, you can use the offset and limit parameters in the GET request’s query string `GET /libraries?offset=5&limit=5` Returns scenes 6…10.
:return: LibrariesList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields', 'desc', 'limit', 'offset', 'async', '_return_http_data_only', '_preload_content',
'_request_timeout']
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scene_libraries" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields']))
if 'desc' in params:
query_params.append(('desc', params['desc']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['Basic authentication']
return self.api_client.call_api(
'/libraries/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibrariesList',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_scene_library(self, lib_id, **kwargs):
"""Return Specific Scene Library
Return Specific Scene Library by specifying its library ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = client.get_scene_library(lib_id, async=True)
>>> result = thread.get()
:param async bool
:param str lib_id: (required)
:param str fields: Choose which fields should return. `GET /libraries/?fields=fps,scene_id,scene_width,scene_height`
:return: LibraryMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_scene_library_with_http_info(lib_id, **kwargs)
else:
(data) = self.get_scene_library_with_http_info(lib_id, **kwargs)
return data
def get_scene_library_with_http_info(self, lib_id, **kwargs):
"""Return Specific Scene Library
Return Specific Scene Library by specifying its library ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = client.get_scene_library_with_http_info(lib_id, async=True)
>>> result = thread.get()
:param async bool
:param str lib_id: (required)
:param str fields: Choose which fields should return. `GET /libraries/?fields=fps,scene_id,scene_width,scene_height`
:return: LibraryMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['lib_id', 'fields', 'async', '_return_http_data_only', '_preload_content', '_request_timeout']
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scene_library" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'lib_id' is set
if ('lib_id' not in params or
params['lib_id'] is None):
raise ValueError("Missing the required parameter `lib_id` when calling `get_scene_library`")
collection_formats = {}
path_params = {}
if 'lib_id' in params:
path_params['libId'] = params['lib_id']
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['Basic authentication']
return self.api_client.call_api(
'/libraries/{libId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LibraryMetadata',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_scenes_from_library(self, lib_id, **kwargs):
"""Return Scenes from Library
Return an array of all the Scenes and their metadata held in a specific Scene Library.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = client.get_scenes_from_library(lib_id, async=True)
>>> result = thread.get()
:param async bool
:param str lib_id: (required)
:param str fields: Choose which fields should return. `GET /libraries/{libId}/scenes/?fields=id,name,description`
:return: ScenesList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_scenes_from_library_with_http_info(lib_id, **kwargs)
else:
(data) = self.get_scenes_from_library_with_http_info(lib_id, **kwargs)
return data
def get_scenes_from_library_with_http_info(self, lib_id, **kwargs):
"""Return Scenes from Library
Return an array of all the Scenes and their metadata held in a specific Scene Library.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = client.get_scenes_from_library_with_http_info(lib_id, async=True)
>>> result = thread.get()
:param async bool
:param str lib_id: (required)
:param str fields: Choose which fields should return. `GET /libraries/{libId}/scenes/?fields=id,name,description`
:return: ScenesList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['lib_id', 'fields', 'async', '_return_http_data_only', '_preload_content', '_request_timeout']
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scenes_from_library" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'lib_id' is set
if ('lib_id' not in params or
params['lib_id'] is None):
raise ValueError("Missing the required parameter `lib_id` when calling `get_scenes_from_library`")
collection_formats = {}
path_params = {}
if 'lib_id' in params:
path_params['libId'] = params['lib_id']
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['Basic authentication']
return self.api_client.call_api(
'/libraries/{libId}/scenes/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScenesList',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.899061
| 194
| 0.614122
| 1,863
| 16,145
| 5.097155
| 0.101986
| 0.015796
| 0.023589
| 0.030329
| 0.927338
| 0.916386
| 0.896062
| 0.890586
| 0.87858
| 0.865733
| 0
| 0.001495
| 0.295881
| 16,145
| 425
| 195
| 37.988235
| 0.833304
| 0.027872
| 0
| 0.747664
| 0
| 0
| 0.180589
| 0.037449
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.014019
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1ae718831870c45ca0b1c7f9636c1b9bd226aece
| 180
|
py
|
Python
|
ppsystem/anmelden/forms.py
|
holytortoise/ppsystem
|
639efbc25f6d0c0c03be9c8688c551ba71cad560
|
[
"MIT"
] | null | null | null |
ppsystem/anmelden/forms.py
|
holytortoise/ppsystem
|
639efbc25f6d0c0c03be9c8688c551ba71cad560
|
[
"MIT"
] | null | null | null |
ppsystem/anmelden/forms.py
|
holytortoise/ppsystem
|
639efbc25f6d0c0c03be9c8688c551ba71cad560
|
[
"MIT"
] | null | null | null |
from django import forms
from . models import Schüler,Rfid
class SuchForm(forms.Form):
vorname = forms.CharField(max_length=50)
nachname = forms.CharField(max_length=50)
| 22.5
| 45
| 0.761111
| 25
| 180
| 5.4
| 0.64
| 0.207407
| 0.251852
| 0.340741
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026144
| 0.15
| 180
| 7
| 46
| 25.714286
| 0.856209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1afdbe79bf7a97c64f82a0b7fe8b3f959b178837
| 82
|
py
|
Python
|
conftest.py
|
yougov/vr.common
|
8b0312481185d0c195e33665df1165a8de7ee3e8
|
[
"MIT"
] | null | null | null |
conftest.py
|
yougov/vr.common
|
8b0312481185d0c195e33665df1165a8de7ee3e8
|
[
"MIT"
] | 4
|
2017-04-02T13:28:36.000Z
|
2019-03-01T14:32:54.000Z
|
conftest.py
|
yougov/vr.common
|
8b0312481185d0c195e33665df1165a8de7ee3e8
|
[
"MIT"
] | 2
|
2018-05-08T16:14:21.000Z
|
2022-03-26T07:43:50.000Z
|
import django.conf
def pytest_configure():
django.conf.settings.configure()
| 13.666667
| 36
| 0.756098
| 10
| 82
| 6.1
| 0.7
| 0.327869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134146
| 82
| 5
| 37
| 16.4
| 0.859155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2132d99e1176aa17164e4d5682b998f77d0729fa
| 8,562
|
py
|
Python
|
python/tests/instruction_test.py
|
Jokeren/hpctoolkit-cuda-memory-patch
|
ba78a7f4cfcf80d74447d5c82bad9fdcbfcde218
|
[
"BSD-3-Clause"
] | 17
|
2021-04-09T05:13:53.000Z
|
2022-03-26T02:58:47.000Z
|
python/tests/instruction_test.py
|
Jokeren/hpctoolkit-cuda-memory-patch
|
ba78a7f4cfcf80d74447d5c82bad9fdcbfcde218
|
[
"BSD-3-Clause"
] | 17
|
2020-08-30T19:10:57.000Z
|
2021-03-26T03:26:42.000Z
|
python/tests/instruction_test.py
|
Jokeren/hpctoolkit-cuda-memory-patch
|
ba78a7f4cfcf80d74447d5c82bad9fdcbfcde218
|
[
"BSD-3-Clause"
] | 4
|
2021-06-29T02:21:24.000Z
|
2021-12-19T19:55:57.000Z
|
from collections import namedtuple
import os
import sys
from test_cases import Test
from utils import pipe_read
class InstructionTest(Test):
Config = namedtuple('Config', ['insts'])
def __init__(self, arch):
super().__init__('InstructionTest', arch)
def setup(self, choices):
for choice in choices:
if choice == 'op_pattern_simple':
self._configs[choice] = InstructionTest.Config(insts={
'sm_70':
['FUNC: 18, PC: 0xd0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 19, PC: 0xc0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 20, PC: 0xf0, ACCESS_KIND: UNKNOWN,v:32,u:32',
'FUNC: 21, PC: 0x250, ACCESS_KIND: FLOAT,v:64,u:64',
'FUNC: 22, PC: 0xe0, ACCESS_KIND: UNKNOWN,v:64,u:64',
'FUNC: 23, PC: 0xe0, ACCESS_KIND: FLOAT,v:64,u:64',
'FUNC: 23, PC: 0x100, ACCESS_KIND: FLOAT,v:64,u:64'],
'sm_75':
['FUNC: 17, PC: 0xb0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 18, PC: 0x20, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 18, PC: 0xe0, ACCESS_KIND: UNKNOWN,v:32,u:32',
'FUNC: 19, PC: 0x20, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 19, PC: 0x240, ACCESS_KIND: FLOAT,v:64,u:64',
'FUNC: 20, PC: 0x20, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 20, PC: 0xd0, ACCESS_KIND: UNKNOWN,v:64,u:64',
'FUNC: 21, PC: 0x20, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 21, PC: 0xd0, ACCESS_KIND: FLOAT,v:64,u:64',
'FUNC: 21, PC: 0xf0, ACCESS_KIND: FLOAT,v:64,u:64'],
'sm_80':
['FUNC: 17, PC: 0xa0, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 17, PC: 0xc0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 18, PC: 0x20, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 18, PC: 0xc0, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 18, PC: 0xe0, ACCESS_KIND: UNKNOWN,v:32,u:32',
'FUNC: 19, PC: 0x20, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 19, PC: 0xe0, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 19, PC: 0x230, ACCESS_KIND: FLOAT,v:64,u:64',
'FUNC: 20, PC: 0x20, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 20, PC: 0xb0, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 20, PC: 0xd0, ACCESS_KIND: UNKNOWN,v:64,u:32',
'FUNC: 21, PC: 0x20, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 21, PC: 0xb0, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 21, PC: 0xd0, ACCESS_KIND: FLOAT,v:64,u:64',
'FUNC: 21, PC: 0xf0, ACCESS_KIND: FLOAT,v:64,u:64']
})
elif choice == 'bfs':
self._configs[choice] = InstructionTest.Config(insts={
'sm_70':
['FUNC: 10, PC: 0xa0, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 10, PC: 0x170, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 10, PC: 0x180, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 10, PC: 0x190, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 10, PC: 0x1a0, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 11, PC: 0x90, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 11, PC: 0xd0, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 11, PC: 0xf0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x120, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x1b0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x1f0, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 11, PC: 0x210, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x290, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x2a0, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 11, PC: 0x2b0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x2c0, ACCESS_KIND: INTEGER,v:32,u:32'],
'sm_75':
['FUNC: 10, PC: 0x70, ACCESS_KIND: INTEGER,v:64,u:64',
'FUNC: 10, PC: 0x80, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 10, PC: 0xc0, ACCESS_KIND: INTEGER,v:64,u:64',
'FUNC: 10, PC: 0xd0, ACCESS_KIND: INTEGER,v:64,u:64',
'FUNC: 10, PC: 0x100, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 10, PC: 0x110, ACCESS_KIND: INTEGER,v:64,u:64',
'FUNC: 10, PC: 0x120, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 10, PC: 0x130, ACCESS_KIND: INTEGER,v:64,u:64',
'FUNC: 10, PC: 0x140, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 10, PC: 0x150, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 11, PC: 0x70, ACCESS_KIND: INTEGER,v:64,u:64',
'FUNC: 11, PC: 0x80, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 11, PC: 0xd0, ACCESS_KIND: INTEGER,v:64,u:64',
'FUNC: 11, PC: 0x100, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 11, PC: 0x110, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x140, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x1c0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x1d0, ACCESS_KIND: INTEGER,v:64,u:64',
'FUNC: 11, PC: 0x1f0, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 11, PC: 0x210, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x240, ACCESS_KIND: INTEGER,v:64,u:64',
'FUNC: 11, PC: 0x280, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x290, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 11, PC: 0x2a0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x2b0, ACCESS_KIND: INTEGER,v:32,u:32'],
'sm_80':
['FUNC: 10, PC: 0x70, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 10, PC: 0xa0, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 10, PC: 0x150, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 10, PC: 0x180, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 10, PC: 0x190, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 10, PC: 0x1a0, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 11, PC: 0x70, ACCESS_KIND: INTEGER,v:64,u:32',
'FUNC: 11, PC: 0x90, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 11, PC: 0xd0, ACCESS_KIND: UNKNOWN,v:8,u:8',
'FUNC: 11, PC: 0xf0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x120, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x1a0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x1e0, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 11, PC: 0x200, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x280, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x290, ACCESS_KIND: INTEGER,v:8,u:8',
'FUNC: 11, PC: 0x2a0, ACCESS_KIND: INTEGER,v:32,u:32',
'FUNC: 11, PC: 0x2b0, ACCESS_KIND: INTEGER,v:32,u:32']
})
def _run_impl(self, case_name, version):
command = Test.cases[case_name].command
options = Test.cases[case_name].options
path = Test.cases[case_name].path
pipe_read(['gvprof', '-cfg', '-e', 'data_flow', command] + options)
files = os.listdir('./gvprof-measurements/structs/nvidia/')
insts = self._configs[case_name].insts
for f in files:
if f.find('.inst') != -1:
bufs = pipe_read(
['redshow_parser', './gvprof-measurements/structs/nvidia/' + f]).decode('utf-8').splitlines()
correct = True
for n, buf in enumerate(bufs):
if buf != insts[self._arch][n]:
print('Error {} line {} (true: {} vs test: {})'.format(
path, n, insts[self._arch][n], buf))
correct = False
if correct is True:
print('Pass ' + path + ' ' + f)
| 59.048276
| 113
| 0.482364
| 1,182
| 8,562
| 3.388325
| 0.112521
| 0.227216
| 0.28015
| 0.296629
| 0.785518
| 0.783521
| 0.775031
| 0.772035
| 0.715855
| 0.691386
| 0
| 0.143014
| 0.362999
| 8,562
| 144
| 114
| 59.458333
| 0.591309
| 0
| 0
| 0.330827
| 0
| 0
| 0.556412
| 0.008643
| 0
| 0
| 0.047886
| 0
| 0
| 1
| 0.022556
| false
| 0.007519
| 0.037594
| 0
| 0.075188
| 0.015038
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
213c5691c47ff403ab45504b072bf661e4396ccb
| 98
|
py
|
Python
|
cryptomon/main.py
|
S0L1DUS/cryptocoinmon
|
37b210ca2f93f0b70f160ad903782408dee0f9e9
|
[
"MIT"
] | null | null | null |
cryptomon/main.py
|
S0L1DUS/cryptocoinmon
|
37b210ca2f93f0b70f160ad903782408dee0f9e9
|
[
"MIT"
] | null | null | null |
cryptomon/main.py
|
S0L1DUS/cryptocoinmon
|
37b210ca2f93f0b70f160ad903782408dee0f9e9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import cryptomon.core
def main():
cryptomon.core.cryptomon().run()
| 12.25
| 36
| 0.622449
| 12
| 98
| 5.083333
| 0.75
| 0.42623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 0.173469
| 98
| 7
| 37
| 14
| 0.740741
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
213fb1888eb54b7c89543e34d845a93fd4676f0c
| 15,520
|
py
|
Python
|
unitTests/testScripts/TestPolynomial.py
|
liute62/NumCpp
|
d6922b2b5e1f575021b0577aea1445e041ec7180
|
[
"MIT"
] | null | null | null |
unitTests/testScripts/TestPolynomial.py
|
liute62/NumCpp
|
d6922b2b5e1f575021b0577aea1445e041ec7180
|
[
"MIT"
] | null | null | null |
unitTests/testScripts/TestPolynomial.py
|
liute62/NumCpp
|
d6922b2b5e1f575021b0577aea1445e041ec7180
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.special as sp
from termcolor import colored
import sys
if sys.platform == 'linux':
sys.path.append(r'../lib')
else:
sys.path.append(r'../build/x64/Release')
import NumCpp
####################################################################################
def doTest():
testPoly1D()
testFunctions()
####################################################################################
def testPoly1D():
print(colored('Testing Polynomial Module', 'magenta'))
print(colored('Testing Poly1d class', 'magenta'))
print(colored('Testing Constructor', 'cyan'))
numCoefficients = np.random.randint(3, 10, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
polyC = NumCpp.Poly1d(coefficientsC, False)
if np.array_equal(polyC.coefficients().getNumpyArray().flatten(), coefficients):
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing Constructor Roots', 'cyan'))
numRoots = np.random.randint(3, 10, [1, ]).item()
roots = np.random.randint(-20, 20, [numRoots, ])
rootsC = NumCpp.NdArray(1, numRoots)
rootsC.setArray(roots)
poly = np.poly1d(roots, True)
polyC = NumCpp.Poly1d(rootsC, True)
if np.array_equal(np.fliplr(polyC.coefficients().getNumpyArray()).flatten().astype(np.int), poly.coefficients):
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing area', 'cyan'))
bounds = np.random.rand(2) * 100 - 50
bounds = np.sort(bounds)
polyIntegral = poly.integ()
if np.round(polyC.area(*bounds), 3) == np.round(polyIntegral(bounds[1]) - polyIntegral(bounds[0]), 3):
print(colored('\tPASS', 'green'))
else:
print(np.round(polyC.area(*bounds), 3))
print(np.round(polyIntegral(bounds[1]) - polyIntegral(bounds[0]), 3))
print(colored('\tFAIL', 'red'))
print(colored('Testing deriv', 'cyan'))
if np.array_equal(polyC.deriv().coefficients().getNumpyArray().flatten(), np.flipud(poly.deriv().coefficients)):
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing integ', 'cyan'))
if np.array_equal(polyC.integ().coefficients().getNumpyArray().flatten(), np.flipud(poly.integ().coefficients)):
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing order', 'cyan'))
if polyC.order() == roots.size:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing operator()', 'cyan'))
value = np.random.randint(-20, 20, [1, ]).item()
if polyC[value] == poly(value):
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing addition', 'cyan'))
numCoefficients = np.random.randint(3, 10, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
polyC2 = NumCpp.Poly1d(coefficientsC, False)
poly2 = np.poly1d(np.flip(coefficients))
if np.array_equal(np.fliplr((polyC + polyC2).coefficients().getNumpyArray()).flatten(),
(poly + poly2).coefficients):
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing subtraction', 'cyan'))
if np.array_equal(np.fliplr((polyC - polyC2).coefficients().getNumpyArray()).flatten(),
(poly - poly2).coefficients):
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing multiplication', 'cyan'))
if np.array_equal(np.fliplr((polyC * polyC2).coefficients().getNumpyArray()).flatten(),
(poly * poly2).coefficients):
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing power', 'cyan'))
exponent = np.random.randint(0, 5, [1, ]).item()
if np.array_equal(np.fliplr((polyC2 ** exponent).coefficients().getNumpyArray()).flatten(),
(poly2 ** exponent).coefficients):
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing print', 'cyan'))
polyC.print()
####################################################################################
def testFunctions():
print(colored('Testing Polynomial functions', 'magenta'))
ORDER_MAX = 5
DECIMALS_ROUND = 7
print(colored('Testing chebyshev_t_Scaler', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
x = np.random.rand(1).item()
valuePy = sp.eval_chebyt(order, x)
valueCpp = NumCpp.chebyshev_t_Scaler(order, x)
if np.round(valuePy, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing chebyshev_t_Array', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
shapeInput = np.random.randint(10, 100, [2, ], dtype=np.uint32)
shape = NumCpp.Shape(*shapeInput)
cArray = NumCpp.NdArray(shape)
x = np.random.rand(*shapeInput)
cArray.setArray(x)
valuePy = sp.eval_chebyt(order, x)
valueCpp = NumCpp.chebyshev_t_Array(order, cArray)
if not np.array_equal(np.round(valuePy, DECIMALS_ROUND), np.round(valueCpp, DECIMALS_ROUND)):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing chebyshev_u_Scaler', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
x = np.random.rand(1).item()
valuePy = sp.eval_chebyu(order, x)
valueCpp = NumCpp.chebyshev_u_Scaler(order, x)
if np.round(valuePy, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing chebyshev_u_Array', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
shapeInput = np.random.randint(10, 100, [2, ], dtype=np.uint32)
shape = NumCpp.Shape(*shapeInput)
cArray = NumCpp.NdArray(shape)
x = np.random.rand(*shapeInput)
cArray.setArray(x)
valuePy = sp.eval_chebyu(order, x)
valueCpp = NumCpp.chebyshev_u_Array(order, cArray)
if not np.array_equal(np.round(valuePy, DECIMALS_ROUND), np.round(valueCpp, DECIMALS_ROUND)):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing hermite_Scaler', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
x = np.random.rand(1).item()
valuePy = sp.eval_hermite(order, x)
valueCpp = NumCpp.hermite_Scaler(order, x)
if np.round(valuePy, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing hermite_Array', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
shapeInput = np.random.randint(10, 100, [2, ], dtype=np.uint32)
shape = NumCpp.Shape(*shapeInput)
cArray = NumCpp.NdArray(shape)
x = np.random.rand(*shapeInput)
cArray.setArray(x)
valuePy = sp.eval_hermite(order, x)
valueCpp = NumCpp.hermite_Array(order, cArray)
if not np.array_equal(np.round(valuePy, DECIMALS_ROUND), np.round(valueCpp, DECIMALS_ROUND)):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing laguerre_Scaler1', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
x = np.random.rand(1).item()
valuePy = sp.eval_laguerre(order, x)
valueCpp = NumCpp.laguerre_Scaler1(order, x)
if np.round(valuePy, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing laguerre_Array1', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
shapeInput = np.random.randint(10, 100, [2, ], dtype=np.uint32)
shape = NumCpp.Shape(*shapeInput)
cArray = NumCpp.NdArray(shape)
x = np.random.rand(*shapeInput)
cArray.setArray(x)
valuePy = sp.eval_laguerre(order, x)
valueCpp = NumCpp.laguerre_Array1(order, cArray)
if not np.array_equal(np.round(valuePy, DECIMALS_ROUND), np.round(valueCpp, DECIMALS_ROUND)):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing laguerre_Scaler2', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
degree = np.random.randint(0, 10, [1, ]).item()
x = np.random.rand(1).item()
valuePy = sp.eval_genlaguerre(degree, order, x)
valueCpp = NumCpp.laguerre_Scaler2(order, degree, x)
if np.round(valuePy, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing laguerre_Array2', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
degree = np.random.randint(0, 10, [1, ]).item()
shapeInput = np.random.randint(10, 100, [2, ], dtype=np.uint32)
shape = NumCpp.Shape(*shapeInput)
cArray = NumCpp.NdArray(shape)
x = np.random.rand(*shapeInput)
cArray.setArray(x)
valuePy = sp.eval_genlaguerre(degree, order, x)
valueCpp = NumCpp.laguerre_Array2(order, degree, cArray)
if not np.array_equal(np.round(valuePy, DECIMALS_ROUND), np.round(valueCpp, DECIMALS_ROUND)):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing legendre_p_Scaler1', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
x = np.random.rand(1).item()
valuePy = sp.eval_legendre(order, x)
valueCpp = NumCpp.legendre_p_Scaler1(order, x)
if np.round(valuePy, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing legendre_p_Array1', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
shapeInput = np.random.randint(10, 100, [2, ], dtype=np.uint32)
shape = NumCpp.Shape(*shapeInput)
cArray = NumCpp.NdArray(shape)
x = np.random.rand(*shapeInput)
cArray.setArray(x)
valuePy = sp.eval_legendre(order, x)
valueCpp = NumCpp.legendre_p_Array1(order, cArray)
if not np.array_equal(np.round(valuePy, DECIMALS_ROUND), np.round(valueCpp, DECIMALS_ROUND)):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(colored('Testing legendre_p_Scaler2', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
x = np.random.rand(1).item()
degree = np.random.randint(order, ORDER_MAX)
valuePy = sp.lpmn(order, degree, x)[0][order, degree]
valueCpp = NumCpp.legendre_p_Scaler2(order, degree, x)
if np.round(valuePy, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing legendre_q_Scaler', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
x = np.random.rand(1).item()
valuePy = sp.lqn(order, x)[0][order]
valueCpp = NumCpp.legendre_q_Scaler(order, x)
if np.round(valuePy, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing spherical_harmonic', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
degree = np.random.randint(order, ORDER_MAX)
theta = np.random.rand(1).item() * np.pi * 2
phi = np.random.rand(1).item() * np.pi
valuePy = sp.sph_harm(order, degree, theta, phi)
valueCpp = NumCpp.spherical_harmonic(order, degree, theta, phi)
if (np.round(valuePy.real, DECIMALS_ROUND) != np.round(valueCpp[0], DECIMALS_ROUND) or
np.round(valuePy.imag, DECIMALS_ROUND) != np.round(valueCpp[1], DECIMALS_ROUND)):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing spherical_harmonic_r', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
degree = np.random.randint(order, ORDER_MAX)
theta = np.random.rand(1).item() * np.pi * 2
phi = np.random.rand(1).item() * np.pi
valuePy = sp.sph_harm(order, degree, theta, phi)
valueCpp = NumCpp.spherical_harmonic_r(order, degree, theta, phi)
if np.round(valuePy.real, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
print(colored('Testing spherical_harmonic_i', 'cyan'))
allTrue = True
for order in range(ORDER_MAX):
degree = np.random.randint(order, ORDER_MAX)
theta = np.random.rand(1).item() * np.pi * 2
phi = np.random.rand(1).item() * np.pi
valuePy = sp.sph_harm(order, degree, theta, phi)
valueCpp = NumCpp.spherical_harmonic_i(order, degree, theta, phi)
if np.round(valuePy.imag, DECIMALS_ROUND) != np.round(valueCpp, DECIMALS_ROUND):
allTrue = False
if allTrue:
print(colored('\tPASS', 'green'))
else:
print(colored('\tFAIL', 'red'))
print(f'valuePy = {valuePy}, valueCpp = {valueCpp}')
####################################################################################
if __name__ == '__main__':
doTest()
| 37.39759
| 116
| 0.601997
| 1,805
| 15,520
| 5.093075
| 0.079224
| 0.11487
| 0.066137
| 0.067008
| 0.868269
| 0.8575
| 0.83346
| 0.821386
| 0.819101
| 0.794953
| 0
| 0.013475
| 0.225387
| 15,520
| 414
| 117
| 37.487923
| 0.751206
| 0
| 0
| 0.707042
| 0
| 0
| 0.124473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008451
| false
| 0.078873
| 0.014085
| 0
| 0.022535
| 0.287324
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
b4a253086549db5e0f9dc6cacd7bc3bc957d6819
| 67
|
py
|
Python
|
rabbicon/__init__.py
|
whistyun/Rabbicon
|
8b972c38e54bddd9eba56f15989567b4f386dc0e
|
[
"Unlicense"
] | null | null | null |
rabbicon/__init__.py
|
whistyun/Rabbicon
|
8b972c38e54bddd9eba56f15989567b4f386dc0e
|
[
"Unlicense"
] | null | null | null |
rabbicon/__init__.py
|
whistyun/Rabbicon
|
8b972c38e54bddd9eba56f15989567b4f386dc0e
|
[
"Unlicense"
] | null | null | null |
def init():
import rabbicon.login
import rabbicon.index
| 16.75
| 26
| 0.671642
| 8
| 67
| 5.625
| 0.75
| 0.622222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253731
| 67
| 4
| 27
| 16.75
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2ecf4a9b11a1fefbef672d7bb9d2cdded8ec1041
| 150
|
py
|
Python
|
Chapter__5/modules_packages/modules/module.py
|
nil1729/python__noob
|
d82d951dc511eafa9f4315e1fdfdc749f484abf1
|
[
"MIT"
] | null | null | null |
Chapter__5/modules_packages/modules/module.py
|
nil1729/python__noob
|
d82d951dc511eafa9f4315e1fdfdc749f484abf1
|
[
"MIT"
] | null | null | null |
Chapter__5/modules_packages/modules/module.py
|
nil1729/python__noob
|
d82d951dc511eafa9f4315e1fdfdc749f484abf1
|
[
"MIT"
] | null | null | null |
def my_method_one():
print('I am From module.py script, Method One')
def my_method_two():
print('I am From module.py script, Method Two')
| 30
| 52
| 0.68
| 26
| 150
| 3.769231
| 0.461538
| 0.102041
| 0.22449
| 0.244898
| 0.653061
| 0.653061
| 0.653061
| 0.653061
| 0
| 0
| 0
| 0
| 0.2
| 150
| 5
| 53
| 30
| 0.816667
| 0
| 0
| 0
| 0
| 0
| 0.517007
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
259d60c7ee6f3ffb9b4d9d775d5bfbdc984b98a8
| 168
|
py
|
Python
|
tests/test_common/test_vision/test_datasets/__init__.py
|
VishalBh4r4mbe/code-soup
|
8499a86df0da6e046bfdb98070e3416bbd0c6af2
|
[
"MIT"
] | null | null | null |
tests/test_common/test_vision/test_datasets/__init__.py
|
VishalBh4r4mbe/code-soup
|
8499a86df0da6e046bfdb98070e3416bbd0c6af2
|
[
"MIT"
] | null | null | null |
tests/test_common/test_vision/test_datasets/__init__.py
|
VishalBh4r4mbe/code-soup
|
8499a86df0da6e046bfdb98070e3416bbd0c6af2
|
[
"MIT"
] | null | null | null |
from tests.test_common.test_vision.test_datasets.cifar_test import TestCIFARDataset
from tests.test_common.test_vision.test_datasets.mnist_test import TestMNISTDataset
| 56
| 83
| 0.904762
| 24
| 168
| 6
| 0.458333
| 0.125
| 0.180556
| 0.263889
| 0.569444
| 0.569444
| 0.569444
| 0.569444
| 0
| 0
| 0
| 0
| 0.047619
| 168
| 2
| 84
| 84
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d327a49e2f66ed3bcd5c6b11ebffcb05594acf6a
| 347
|
py
|
Python
|
Curso Udemy 2022/Nova_organizacao/Aula_88_criar_pacotes_python/vendas/calc_preco.py
|
Matheusfarmaceutico/Exercicios-Python
|
d1821bd9d11ea0707074c5fe11dead2e85476ebd
|
[
"MIT"
] | null | null | null |
Curso Udemy 2022/Nova_organizacao/Aula_88_criar_pacotes_python/vendas/calc_preco.py
|
Matheusfarmaceutico/Exercicios-Python
|
d1821bd9d11ea0707074c5fe11dead2e85476ebd
|
[
"MIT"
] | null | null | null |
Curso Udemy 2022/Nova_organizacao/Aula_88_criar_pacotes_python/vendas/calc_preco.py
|
Matheusfarmaceutico/Exercicios-Python
|
d1821bd9d11ea0707074c5fe11dead2e85476ebd
|
[
"MIT"
] | null | null | null |
from formata.preco import real
def aumento(valor, porcentagem,formata=False):
r = valor + (valor * porcentagem / 100)
if formata:
return real(r)
else:
return r
def reducao(valor, porcentagem,formata=False):
r = valor - (valor * porcentagem / 100)
if formata:
return real(r)
else:
return r
| 21.6875
| 46
| 0.616715
| 43
| 347
| 4.976744
| 0.372093
| 0.299065
| 0.214953
| 0.261682
| 0.785047
| 0.785047
| 0.785047
| 0.785047
| 0.785047
| 0.785047
| 0
| 0.024292
| 0.288184
| 347
| 16
| 47
| 21.6875
| 0.842105
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d3831489140f64f52791cedce326d3f85d88ea28
| 127
|
py
|
Python
|
allotools/__init__.py
|
mullenkamp/EcanAlloUsageTools
|
c02e8f8edbae7a5ee150880ed327c1d42a422b12
|
[
"Apache-2.0"
] | null | null | null |
allotools/__init__.py
|
mullenkamp/EcanAlloUsageTools
|
c02e8f8edbae7a5ee150880ed327c1d42a422b12
|
[
"Apache-2.0"
] | null | null | null |
allotools/__init__.py
|
mullenkamp/EcanAlloUsageTools
|
c02e8f8edbae7a5ee150880ed327c1d42a422b12
|
[
"Apache-2.0"
] | 1
|
2020-11-01T23:06:13.000Z
|
2020-11-01T23:06:13.000Z
|
from allotools.core import AlloUsage
from allotools import util
from allotools import filters
from allotools import parameters
| 25.4
| 36
| 0.866142
| 17
| 127
| 6.470588
| 0.470588
| 0.472727
| 0.518182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125984
| 127
| 4
| 37
| 31.75
| 0.990991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9f0931eebf0f5487dd8514d5b164c35201630c20
| 7,709
|
py
|
Python
|
bnn/src/training/Theano/vae.py
|
Siraj-Qazi/BNN-PYNQ
|
b942fe92b3c62b0b877b0a9d5c13e7eb3a234685
|
[
"BSD-3-Clause"
] | null | null | null |
bnn/src/training/Theano/vae.py
|
Siraj-Qazi/BNN-PYNQ
|
b942fe92b3c62b0b877b0a9d5c13e7eb3a234685
|
[
"BSD-3-Clause"
] | null | null | null |
bnn/src/training/Theano/vae.py
|
Siraj-Qazi/BNN-PYNQ
|
b942fe92b3c62b0b877b0a9d5c13e7eb3a234685
|
[
"BSD-3-Clause"
] | null | null | null |
import lasagne
import binary_net
def genCnv(input, num_outputs, learning_parameters):
# A function to generate the cnv network topology which matches the overlay for the Pynq board.
# WARNING: If you change this file, it's likely the resultant weights will not fit on the Pynq overlay.
stochastic = False
binary = True
H = 1
activation = binary_net.binary_tanh_unit
W_LR_scale = learning_parameters.W_LR_scale
epsilon = learning_parameters.epsilon
alpha = learning_parameters.alpha
# Encoder
cnn = lasagne.layers.InputLayer(shape=(None, 1, 28, 28), input_var=input)
# 1st Layer
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=64,
filter_size=(4, 4),
pad='valid',
stride=(2,2),
flip_filters=False,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(cnn, epsilon=epsilon, alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(cnn, nonlinearity=activation)
cnn = lasagne.layers.DropoutLayer(cnn, p = 0.2)
print cnn.output_shape
# 2nd Layer
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=64,
filter_size=(4, 4),
pad='valid',
stride=(2,2),
flip_filters=False,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(cnn, epsilon=epsilon, alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(cnn, nonlinearity=activation)
cnn = lasagne.layers.DropoutLayer(cnn, p = 0.2)
print cnn.output_shape
# 3rd Layer
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=64,
filter_size=(4, 4),
pad='valid',
stride=(1,1),
flip_filters=False,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(cnn, epsilon=epsilon, alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(cnn, nonlinearity=activation)
cnn = lasagne.layers.DropoutLayer(cnn, p = 0.2)
print cnn.output_shape
cnn = lasagne.layers.flatten(cnn)
print cnn.output_shape
# FC Layer
cnn = binary_net.DenseLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=256)
cnn = lasagne.layers.BatchNormLayer(cnn, epsilon=epsilon, alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(cnn, nonlinearity=activation)
print cnn.output_shape
# Deoceder
cnn = lasagne.layers.ReshapeLayer(cnn, shape = (-1, 64, 2, 2))
print cnn.output_shape
# 1st Deconv Layer
cnn = binary_net.Deconv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=64,
filter_size=(4, 4),
crop='valid',
stride=(2,2),
flip_filters=False,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(cnn, epsilon=epsilon, alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(cnn, nonlinearity=activation)
print cnn.output_shape
# 2nd Deconv Layer
cnn = binary_net.Deconv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=64,
filter_size=(4, 4),
crop='valid',
stride=(2,2),
flip_filters=False,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(cnn, epsilon=epsilon, alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(cnn, nonlinearity=activation)
print cnn.output_shape
# 3rd Deconv Layer
cnn = binary_net.Deconv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=1,
filter_size=(4, 4),
crop='valid',
stride=(2,2),
flip_filters=False,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(cnn, epsilon=epsilon, alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(cnn, nonlinearity=activation)
print cnn.output_shape
cnn = lasagne.layers.flatten(cnn)
print cnn.output_shape
# Last FC layer
cnn = binary_net.DenseLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=num_outputs)
cnn = lasagne.layers.BatchNormLayer(cnn, epsilon=epsilon, alpha=alpha)
print cnn.output_shape
return cnn
def genCnvInf(input, num_classes, learning_parameters):
# ENCODER
cnn = lasagne.layers.InputLayer(shape=(None, 1, 28, 28), input_var=input)
cnn = lasagne.layers.Conv2DLayer(cnn, num_filters=64, filter_size=(4, 4), pad='valid', stride=(2, 2), flip_filters=False, nonlinearity=lasagne.nonlinearities.rectify)
print cnn.output_shape
cnn = lasagne.layers.DropoutLayer(cnn, p = 0.2)
cnn = lasagne.layers.Conv2DLayer(cnn, num_filters=64, filter_size=(4, 4), pad='valid', stride=(2, 2), flip_filters=False, nonlinearity=lasagne.nonlinearities.rectify)
cnn = lasagne.layers.DropoutLayer(cnn, p = 0.2)
print cnn.output_shape
cnn = lasagne.layers.Conv2DLayer(cnn, num_filters=64, filter_size=(4, 4), pad='valid', stride=(1, 1), flip_filters=False, nonlinearity=lasagne.nonlinearities.rectify)
cnn = lasagne.layers.DropoutLayer(cnn, p = 0.2)
print cnn.output_shape
cnn = lasagne.layers.flatten(cnn)
print cnn.output_shape
cnn = lasagne.layers.DenseLayer(cnn, nonlinearity=lasagne.nonlinearities.rectify, num_units=8)
print cnn.output_shape
# DECODER
print "Decoder"
cnn = lasagne.layers.DenseLayer(cnn, nonlinearity=lasagne.nonlinearities.rectify, num_units=256)
print cnn.output_shape
cnn = lasagne.layers.ReshapeLayer(cnn, shape = (-1, 64, 2, 2))
print cnn.output_shape
cnn = lasagne.layers.Deconv2DLayer(cnn, num_filters=64, filter_size=(4,4), crop='valid', stride=(2,2), flip_filters=False, nonlinearity=lasagne.nonlinearities.rectify)
print cnn.output_shape
cnn = lasagne.layers.Deconv2DLayer(cnn, num_filters=64, filter_size=(4,4), crop='valid', stride=(2,2), flip_filters=False, nonlinearity=lasagne.nonlinearities.rectify)
print cnn.output_shape
cnn = lasagne.layers.Deconv2DLayer(cnn, num_filters=64, filter_size=(4,4), crop='valid', stride=(2,2), flip_filters=False, nonlinearity=lasagne.nonlinearities.rectify)
print cnn.output_shape
cnn = lasagne.layers.flatten(cnn)
cnn = lasagne.layers.DenseLayer(cnn, nonlinearity=lasagne.nonlinearities.sigmoid, num_units=num_classes)
print cnn.output_shape
return cnn
| 33.663755
| 171
| 0.620573
| 879
| 7,709
| 5.303754
| 0.118316
| 0.08151
| 0.130416
| 0.089661
| 0.887387
| 0.884813
| 0.872801
| 0.868726
| 0.851566
| 0.837194
| 0
| 0.023353
| 0.283435
| 7,709
| 228
| 172
| 33.811404
| 0.820601
| 0.044623
| 0
| 0.876543
| 1
| 0
| 0.009116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.012346
| null | null | 0.141975
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
9f1b7f9e721722aa11ac08772c1d0793cb742024
| 8,037
|
py
|
Python
|
src/ionotomo/tests/test_frames.py
|
Joshuaalbert/IonoTomo
|
9f50fbac698d43a824dd098d76dce93504c7b879
|
[
"Apache-2.0"
] | 7
|
2017-06-22T08:47:07.000Z
|
2021-07-01T12:33:02.000Z
|
src/ionotomo/tests/test_frames.py
|
Joshuaalbert/IonoTomo
|
9f50fbac698d43a824dd098d76dce93504c7b879
|
[
"Apache-2.0"
] | 1
|
2019-04-03T15:21:19.000Z
|
2019-04-03T15:48:31.000Z
|
src/ionotomo/tests/test_frames.py
|
Joshuaalbert/IonoTomo
|
9f50fbac698d43a824dd098d76dce93504c7b879
|
[
"Apache-2.0"
] | 2
|
2020-03-01T16:20:00.000Z
|
2020-07-07T15:09:02.000Z
|
from ionotomo.astro.frames.uvw_frame import UVW
from ionotomo.astro.frames.pointing_frame import Pointing
from ionotomo.astro.frames.enu_frame import ENU
import astropy.coordinates as ac
import astropy.time as at
import astropy.units as u
import numpy as np
def test_enu():
time = at.Time("2017-01-26T17:07:00.000",format='isot',scale='utc')
loc = ac.EarthLocation(lon=10*u.deg,lat=0*u.deg,height=0*u.km)
enu = ENU(location=loc,obstime=time)
# print("With dim test:")
enucoords = ac.SkyCoord(east = np.array([0,1])*u.m,
north=np.array([0,1])*u.m,
up=np.array([0,1])*u.m,frame=enu)
# print (enucoords)
#enucoords.transform_to('itrs')
c2 = enucoords.transform_to('itrs').transform_to(enu)
assert np.all(np.isclose(enucoords.cartesian.xyz.value, c2.cartesian.xyz.value))
# print("Without dim test:")
enucoords = ac.SkyCoord(east = np.array([0,1]),
north=np.array([0,1]),
up=np.array([0,1]),frame=enu)
#print(enucoords.transform_to('itrs'))
c2 = enucoords.transform_to('itrs').transform_to(enu)
assert np.all(np.isclose(enucoords.cartesian.xyz.value, c2.cartesian.xyz.value))
def compVectors(a,b):
a = a.cartesian.xyz.value
#a /= np.linalg.norm(a)
b = b.cartesian.xyz.value
#b /= np.linalg.norm(b)
#h = np.linalg.norm(a-b)
return np.all(np.isclose(a,b))
#return h < 1e-6
def test_uvw():
# with X - East, Z - NCP and Y - Down
time = at.Time("2017-01-26T17:07:00.000",format='isot',scale='utc')
loc = ac.EarthLocation(lon=10*u.deg,lat=10*u.deg,height=0*u.km)
enu = ENU(location=loc,obstime=time)
x = ac.SkyCoord(1,0,0,frame=enu)
z = ac.SkyCoord(0,np.cos(loc.geodetic[1].rad),np.sin(loc.geodetic[1].rad),frame=enu)
#ncp = ac.SkyCoord(0*u.one,0*u.one,1*u.one,frame='itrs').transform_to(enu)
y = ac.SkyCoord(0,np.sin(loc.geodetic[1].rad),-np.cos(loc.geodetic[1].rad),frame=enu)
lst = time.sidereal_time('mean',10*u.deg)# ac.AltAz(alt=90*u.deg,az=0*u.deg,location=loc,obstime=time).transform_to(ac.ICRS).ra
#ha = lst - ra
print("a) when ha=0,dec=90 uvw aligns with xyz")
ha = 0*u.deg
ra = lst - ha
dec = 90*u.deg
phaseTrack = ac.SkyCoord(ra,dec,frame=ac.ICRS)
uvw = UVW(obstime=time,location=loc,phase=phaseTrack)
U = ac.SkyCoord(1,0,0,frame=uvw).transform_to(enu)
V = ac.SkyCoord(0,1,0,frame=uvw).transform_to(enu)
W = ac.SkyCoord(0,0,1,frame=uvw).transform_to(enu)
assert compVectors(U,x),"fail test a, u != x"
assert compVectors(V,y),"fail test a, v != y"
assert compVectors(W,z),"fail test a, w != z"
#print("passed a")
#print("b) v, w, z are always on great circle")
assert np.cross(V.cartesian.xyz.value,W.cartesian.xyz.value).dot(z.cartesian.xyz.value) < 1e-10, "Not on the great circle"
#print("passed b")
#print("c) when ha = 0 U points east")
ha = 0*u.deg
ra = lst - ha
dec = 35*u.deg
phaseTrack = ac.SkyCoord(ra,dec,frame=ac.ICRS)
uvw = UVW(obstime=time,location=loc,phase=phaseTrack)
U = ac.SkyCoord(1*u.m,0*u.m,0*u.m,frame=uvw).transform_to(enu)
V = ac.SkyCoord(0*u.m,1*u.m,0*u.m,frame=uvw).transform_to(enu)
W = ac.SkyCoord(0*u.m,0*u.m,1*u.m,frame=uvw).transform_to(enu)
assert np.cross(V.cartesian.xyz.value,W.cartesian.xyz.value).dot(z.cartesian.xyz.value) < 1e-10, "Not on the great circle"
east = ac.SkyCoord(1,0,0,frame=enu)
assert compVectors(U,east),"fail test c, u != east"
#print("passed c")
#print("d) when dec=0 and ha = -6 w points east")
ha = -6*u.hourangle
ra = lst - ha
dec = 0*u.deg
phaseTrack = ac.SkyCoord(ra,dec,frame=ac.ICRS)
uvw = UVW(obstime=time,location=loc,phase=phaseTrack)
U = ac.SkyCoord(1*u.m,0*u.m,0*u.m,frame=uvw).transform_to(enu)
V = ac.SkyCoord(0*u.m,1*u.m,0*u.m,frame=uvw).transform_to(enu)
W = ac.SkyCoord(0*u.m,0*u.m,1*u.m,frame=uvw).transform_to(enu)
assert np.cross(V.cartesian.xyz.value,W.cartesian.xyz.value).dot(z.cartesian.xyz.value) < 1e-10, "Not on the great circle"
assert compVectors(W,east),"fail test d, w != east"
#print("passed d")
def test_pointing():
#print("Test uv conventions when fix and obs time are equal")
# with X - East, Z - NCP and Y - Down
time = at.Time("2017-01-26T17:07:00.000",format='isot',scale='utc')
loc = ac.EarthLocation(lon=10*u.deg,lat=10*u.deg,height=0*u.km)
enu = ENU(location=loc,obstime=time)
x = ac.SkyCoord(1,0,0,frame=enu)
z = ac.SkyCoord(0,np.cos(loc.geodetic[1].rad),np.sin(loc.geodetic[1].rad),frame=enu)
#ncp = ac.SkyCoord(0*u.one,0*u.one,1*u.one,frame='itrs').transform_to(enu)
y = ac.SkyCoord(0,np.sin(loc.geodetic[1].rad),-np.cos(loc.geodetic[1].rad),frame=enu)
lst = time.sidereal_time('mean',10*u.deg)#ac.AltAz(alt=90*u.deg,az=0*u.deg,location=loc,obstime=time).transform_to(ac.ICRS).ra
#ha = lst - ra
#print("a) when ha=0,dec=90 pointing aligns with xyz")
ha = 0*u.deg
ra = lst - ha
dec = 90*u.deg
phaseTrack = ac.SkyCoord(ra,dec,frame=ac.ICRS)
pointing = Pointing(obstime=time,location=loc,phase=phaseTrack,fixtime=time)
U = ac.SkyCoord(1,0,0,frame=pointing).transform_to(enu)
V = ac.SkyCoord(0,1,0,frame=pointing).transform_to(enu)
W = ac.SkyCoord(0,0,1,frame=pointing).transform_to(enu)
assert compVectors(U,x),"fail test a, u != x"
assert compVectors(V,y),"fail test a, v != y"
assert compVectors(W,z),"fail test a, w != z"
#print("passed a")
#print("b) v, w, z are always on great circle")
assert np.cross(V.cartesian.xyz.value,W.cartesian.xyz.value).dot(z.cartesian.xyz.value) < 1e-10, "Not on the great circle"
#print("passed b")
#print("c) when ha = 0 U points east")
ha = 0*u.deg
ra = lst - ha
dec = 35*u.deg
phaseTrack = ac.SkyCoord(ra,dec,frame=ac.ICRS)
pointing = Pointing(obstime=time,location=loc,phase=phaseTrack,fixtime=time)
U = ac.SkyCoord(1*u.m,0*u.m,0*u.m,frame=pointing).transform_to(enu)
V = ac.SkyCoord(0*u.m,1*u.m,0*u.m,frame=pointing).transform_to(enu)
W = ac.SkyCoord(0*u.m,0*u.m,1*u.m,frame=pointing).transform_to(enu)
assert np.cross(V.cartesian.xyz.value,W.cartesian.xyz.value).dot(z.cartesian.xyz.value) < 1e-10, "Not on the great circle"
east = ac.SkyCoord(1,0,0,frame=enu)
assert compVectors(U,east),"fail test c, u != east"
#print("passed c")
#print("d) when dec=0 and ha = -6 w points east")
ha = -6*u.hourangle
ra = lst - ha
dec = 0*u.deg
phaseTrack = ac.SkyCoord(ra,dec,frame=ac.ICRS)
pointing = Pointing(obstime=time,location=loc,phase=phaseTrack,fixtime=time)
U = ac.SkyCoord(1*u.m,0*u.m,0*u.m,frame=pointing).transform_to(enu)
V = ac.SkyCoord(0*u.m,1*u.m,0*u.m,frame=pointing).transform_to(enu)
W = ac.SkyCoord(0*u.m,0*u.m,1*u.m,frame=pointing).transform_to(enu)
assert np.cross(V.cartesian.xyz.value,W.cartesian.xyz.value).dot(z.cartesian.xyz.value) < 1e-10, "Not on the great circle"
assert compVectors(W,east),"fail test d, w != east"
#print("passed d")
#print("More tests")
fixtime = at.Time("2005-01-26T07:00:00.000",format='isot',scale='tai')
time = at.Time("2005-01-26T13:00:00.000",format='isot',scale='tai')
loc = ac.EarthLocation(lon=0*u.deg,lat=0*u.deg,height=0*u.km)
lst = time.sidereal_time('mean',0*u.deg)#ac.AltAz(alt=90*u.deg,az=0*u.deg,location=loc,obstime=fixtime).transform_to(ac.ICRS).ra
#ha = lst - ra
ha = 0*u.deg
ra = lst - ha
dec = 0*u.deg
phaseTrack = ac.SkyCoord(ra,dec,frame=ac.ICRS)
#print(phaseTrack)
pointing = Pointing(obstime=fixtime,location=loc,phase=phaseTrack,fixtime=fixtime)
#print(phaseTrack.transform_to(pointing))
#from ENUFrame import ENU
enu = ENU(location=loc,obstime=time)
east = ac.SkyCoord(1,0,0,frame=enu)
print(east.cartesian.xyz)
print(east.transform_to(pointing).cartesian.xyz)
print(phaseTrack.transform_to(pointing).cartesian.xyz)
| 48.709091
| 132
| 0.651736
| 1,443
| 8,037
| 3.601525
| 0.09009
| 0.018857
| 0.078507
| 0.012315
| 0.873581
| 0.829902
| 0.817779
| 0.807389
| 0.796229
| 0.790071
| 0
| 0.040143
| 0.166231
| 8,037
| 164
| 133
| 49.006098
| 0.735413
| 0.167724
| 0
| 0.704918
| 0
| 0
| 0.082757
| 0.017304
| 0
| 0
| 0
| 0
| 0.147541
| 1
| 0.032787
| false
| 0
| 0.057377
| 0
| 0.098361
| 0.032787
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9f5091a489176c32452dcc634044063420f064cb
| 31,798
|
py
|
Python
|
tests/test_dedup.py
|
fferrin/pytopojson
|
5128136c9502f4e29330b6cc7e524641bff5f95e
|
[
"0BSD"
] | 11
|
2019-11-15T23:22:52.000Z
|
2022-01-22T20:46:30.000Z
|
tests/test_dedup.py
|
fferrin/topojson
|
7f90e497d2b54798f51480181c81c330770cb401
|
[
"0BSD"
] | 8
|
2019-11-08T03:03:29.000Z
|
2022-02-28T09:52:09.000Z
|
tests/test_dedup.py
|
fferrin/topojson
|
7f90e497d2b54798f51480181c81c330770cb401
|
[
"0BSD"
] | 2
|
2020-07-09T06:45:31.000Z
|
2021-03-22T13:38:35.000Z
|
import unittest
from pytopojson import (
cut,
extract,
dedup,
)
class CutTestCase(unittest.TestCase):
def setUp(self):
self.cut = cut.Cut()
self.dedup = dedup.Dedup()
self.extract = extract.Extract()
def test_dedup_exact_duplicate_lines_abc_and_abc_share_an_arc(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
"abc2": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [2, 0]],
},
}
)
)
)
self.assertDictEqual(
{
"abc": {"type": "LineString", "arcs": {0: 0, 1: 2}},
"abc2": {"type": "LineString", "arcs": {0: 0, 1: 2}},
},
topology["objects"],
)
def test_dedup_reversed_duplicate_lines_abc_and_cba_share_an_arc(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
"cba": {"type": "LineString", "arcs": [[2, 0], [1, 0], [0, 0]]},
}
)
)
)
self.assertDictEqual(
{
"abc": {"type": "LineString", "arcs": {0: 0, 1: 2}},
"cba": {"type": "LineString", "arcs": {0: 2, 1: 0}},
},
topology["objects"],
)
def test_dedup_exact_duplicate_rings_abca_and_abca_share_an_arc(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [2, 0], [0, 0]]],
},
"abca2": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [2, 0], [0, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abca": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
"abca2": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
},
topology["objects"],
)
def test_dedup_reversed_duplicate_rings_acba_and_abca_share_an_arc(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [2, 0], [0, 0]]],
},
"acba": {
"type": "Polygon",
"arcs": [[[0, 0], [2, 0], [1, 0], [0, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abca": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
"acba": {"type": "Polygon", "arcs": [{0: 3, 1: 0}]},
},
topology["objects"],
)
def test_dedup_rotated_duplicate_rings_bcab_and_abca_share_an_arc(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [2, 0], [0, 0]]],
},
"bcab": {
"type": "Polygon",
"arcs": [[[1, 0], [2, 0], [0, 0], [1, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abca": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
"bcab": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
},
topology["objects"],
)
def test_dedup_ring_abca_and_line_abca_have_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abcaLine": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [2, 0], [0, 0]],
},
"abcaPolygon": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [2, 0], [0, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abcaLine": {"type": "LineString", "arcs": {0: 0, 1: 3}},
"abcaPolygon": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
},
topology["objects"],
)
def test_dedup_ring_bcab_and_line_abca_have_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abcaLine": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [2, 0], [0, 0]],
},
"bcabPolygon": {
"type": "Polygon",
"arcs": [[[1, 0], [2, 0], [0, 0], [1, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abcaLine": {"type": "LineString", "arcs": {0: 0, 1: 3}},
"bcabPolygon": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
},
topology["objects"],
)
def test_dedup_ring_abca_and_line_bcab_have_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{
"bcabLine": {
"type": "LineString",
"arcs": [[1, 0], [2, 0], [0, 0], [1, 0]],
},
"abcaPolygon": {
"type": "Polygon",
"arcs": [
[[0, 0], [1, 0], [2, 0], [0, 0]]
], # rotated to BCAB
},
}
)
)
)
self.assertDictEqual(
{
"bcabLine": {"type": "LineString", "arcs": {0: 0, 1: 3}},
"abcaPolygon": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
},
topology["objects"],
)
def test_dedup_when_an_old_arc_abc_extends_a_new_arc_ab_abc_is_cut_into_ab_bc(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
"ab": {"type": "LineString", "arcs": [[0, 0], [1, 0]]},
}
)
)
)
self.assertDictEqual(
{
"abc": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 2}},
},
"ab": {"type": "LineString", "arcs": {0: 0, 1: 1}},
},
topology["objects"],
)
def test_dedup_when_a_reversed_old_arc_cba_extends_a_new_arc_ab_cba_is_cut_into_cb_ba(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"cba": {"type": "LineString", "arcs": [[2, 0], [1, 0], [0, 0]]},
"ab": {"type": "LineString", "arcs": [[0, 0], [1, 0]]},
}
)
)
)
self.assertDictEqual(
{
"cba": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 2}},
},
"ab": {"type": "LineString", "arcs": {0: 2, 1: 1}},
},
topology["objects"],
)
def test_dedup_when_a_new_arc_ade_shares_its_start_with_an_old_arc_abc_there_are_no_cuts(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"ade": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 1], [2, 1]]},
}
)
)
)
self.assertDictEqual(
{
"ade": {"type": "LineString", "arcs": {0: 0, 1: 2}},
"abc": {"type": "LineString", "arcs": {0: 3, 1: 5}},
},
topology["objects"],
)
def test_dedup_ring_aba_has_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{"aba": {"type": "Polygon", "arcs": [[[0, 0], [1, 0], [0, 0]]]}}
)
)
)
self.assertDictEqual(
{"aba": {"type": "Polygon", "arcs": [{0: 0, 1: 2}]}}, topology["objects"]
)
def test_dedup_ring_aa_has_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract({"aa": {"type": "Polygon", "arcs": [[[0, 0], [0, 0]]]}})
)
)
self.assertDictEqual(
{"aa": {"type": "Polygon", "arcs": [{0: 0, 1: 1}]}}, topology["objects"]
)
def test_dedup_degenerate_ring_a_has_no_cuts(self):
topology = self.dedup(
self.cut(self.extract({"a": {"type": "Polygon", "arcs": [[[0, 0]]]}}))
)
self.assertDictEqual(
{"a": {"type": "Polygon", "arcs": [{0: 0, 1: 0}]}}, topology["objects"]
)
def test_dedup_when_a_new_line_dec_shares_its_end_with_an_old_line_abc_there_are_no_cuts(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
"dec": {"type": "LineString", "arcs": [[0, 1], [1, 1], [2, 0]]},
}
)
)
)
self.assertDictEqual(
{
"abc": {"type": "LineString", "arcs": {0: 0, 1: 2}},
"dec": {"type": "LineString", "arcs": {0: 3, 1: 5}},
},
topology["objects"],
)
def test_dedup_when_a_new_line_abc_extends_an_old_line_ab_abc_is_cut_into_ab_bc(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"ab": {"type": "LineString", "arcs": [[0, 0], [1, 0]]},
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
}
)
)
)
self.assertDictEqual(
{
"ab": {"type": "LineString", "arcs": {0: 0, 1: 1}},
"abc": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 3, 1: 4}},
},
},
topology["objects"],
)
def test_dedup_when_a_new_line_abc_extends_a_reversed_old_line_ba_abc_is_cut_into_ab_bc(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"ba": {"type": "LineString", "arcs": [[1, 0], [0, 0]]},
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
}
)
)
)
self.assertDictEqual(
{
"ba": {"type": "LineString", "arcs": {0: 0, 1: 1}},
"abc": {
"type": "LineString",
"arcs": {0: 1, 1: 0, "next": {0: 3, 1: 4}},
},
},
topology["objects"],
)
def test_dedup_when_a_new_line_starts_bc_in_the_middle_of_an_old_line_abc_abc_is_cut_into_ab_bc(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
"bc": {"type": "LineString", "arcs": [[1, 0], [2, 0]]},
}
)
)
)
self.assertDictEqual(
{
"abc": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 2}},
},
"bc": {"type": "LineString", "arcs": {0: 1, 1: 2}},
},
topology["objects"],
)
def test_dedup_when_a_new_line_bc_starts_in_the_middle_of_a_reversed_old_line_cba_cba_is_cut_into_cb_ba(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"cba": {"type": "LineString", "arcs": [[2, 0], [1, 0], [0, 0]]},
"bc": {"type": "LineString", "arcs": [[1, 0], [2, 0]]},
}
)
)
)
self.assertDictEqual(
{
"cba": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 2}},
},
"bc": {"type": "LineString", "arcs": {0: 1, 1: 0}},
},
topology["objects"],
)
def test_dedup_when_a_new_line_abd_deviates_from_an_old_line_abc_abd_is_cut_into_ab_bd_and_abc_is_cut_into_ab_bc(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
"abd": {"type": "LineString", "arcs": [[0, 0], [1, 0], [3, 0]]},
}
)
)
)
self.assertDictEqual(
{
"abc": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 2}},
},
"abd": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 4, 1: 5}},
},
},
topology["objects"],
)
def test_dedup_when_a_new_line_abd_deviates_from_a_reversed_old_line_cba_cba_is_cut_into_cb_ba_and_abd_is_cut_into_ab_bd(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"cba": {"type": "LineString", "arcs": [[2, 0], [1, 0], [0, 0]]},
"abd": {"type": "LineString", "arcs": [[0, 0], [1, 0], [3, 0]]},
}
)
)
)
self.assertDictEqual(
{
"cba": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 2}},
},
"abd": {
"type": "LineString",
"arcs": {0: 2, 1: 1, "next": {0: 4, 1: 5}},
},
},
topology["objects"],
)
def test_dedup_when_a_new_line_dbc_merges_into_an_old_line_abc_dbc_is_cut_into_db_bc_and_abc_is_cut_into_ab_bc(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
"dbc": {"type": "LineString", "arcs": [[3, 0], [1, 0], [2, 0]]},
}
)
)
)
self.assertDictEqual(
{
"abc": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 2}},
},
"dbc": {
"type": "LineString",
"arcs": {0: 3, 1: 4, "next": {0: 1, 1: 2}},
},
},
topology["objects"],
)
def test_dedup_when_a_new_line_dbc_merges_into_a_reversed_old_line_cba_dbc_is_cut_into_db_bc_and_cba_is_cut_into_cb_ba(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"cba": {"type": "LineString", "arcs": [[2, 0], [1, 0], [0, 0]]},
"dbc": {"type": "LineString", "arcs": [[3, 0], [1, 0], [2, 0]]},
}
)
)
)
self.assertDictEqual(
{
"cba": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 2}},
},
"dbc": {
"type": "LineString",
"arcs": {0: 3, 1: 4, "next": {0: 1, 1: 0}},
},
},
topology["objects"],
)
def test_dedup_when_a_new_line_dbe_shares_a_single_midpoint_with_an_old_line_abc_dbe_is_cut_into_db_be_and_abc_is_cut_into_ab_bc(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abc": {"type": "LineString", "arcs": [[0, 0], [1, 0], [2, 0]]},
"dbc": {"type": "LineString", "arcs": [[0, 1], [1, 0], [2, 1]]},
}
)
)
)
self.assertDictEqual(
{
"abc": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 2}},
},
"dbc": {
"type": "LineString",
"arcs": {0: 3, 1: 4, "next": {0: 4, 1: 5}},
},
},
topology["objects"],
)
def test_dedup_when_a_new_line_abde_skips_a_point_with_an_old_line_abcde_abde_is_cut_into_ab_bd_de_and_abcde_is_cut_into_ab_bcd_de(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abcde": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
},
"abde": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [3, 0], [4, 0]],
},
}
)
)
)
self.assertDictEqual(
{
"abcde": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 3, "next": {0: 3, 1: 4}}},
},
"abde": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 6, 1: 7, "next": {0: 3, 1: 4}}},
},
},
topology["objects"],
)
def test_dedup_when_a_new_line_abde_skips_a_point_with_a_reversed_old_line_edcba_abde_is_cut_into_ab_bd_de_and_edcba_is_cut_into_ed_dcb_ba(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"edcba": {
"type": "LineString",
"arcs": [[4, 0], [3, 0], [2, 0], [1, 0], [0, 0]],
},
"abde": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [3, 0], [4, 0]],
},
}
)
)
)
self.assertDictEqual(
{
"edcba": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 3, "next": {0: 3, 1: 4}}},
},
"abde": {
"type": "LineString",
"arcs": {0: 4, 1: 3, "next": {0: 6, 1: 7, "next": {0: 1, 1: 0}}},
},
},
topology["objects"],
)
def test_dedup_when_a_line_abcdbe_self_intersects_with_its_middle_it_is_not_cut(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abcdbe": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [2, 0], [3, 0], [1, 0], [4, 0]],
}
}
)
)
)
self.assertDictEqual(
{"abcdbe": {"type": "LineString", "arcs": {0: 0, 1: 5}}},
topology["objects"],
)
def test_dedup_when_a_line_abacd_self_intersects_with_its_start_it_is_cut_into_aba_acd(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abacd": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [0, 0], [3, 0], [4, 0]],
}
}
)
)
)
self.assertDictEqual(
{
"abacd": {
"type": "LineString",
"arcs": {0: 0, 1: 2, "next": {0: 2, 1: 4}},
}
},
topology["objects"],
)
def test_dedup_when_a_line_abdcd_self_intersects_with_its_end_it_is_cut_into_abd_dcd(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abdcd": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [4, 0], [3, 0], [4, 0]],
}
}
)
)
)
self.assertDictEqual(
{
"abdcd": {
"type": "LineString",
"arcs": {0: 0, 1: 2, "next": {0: 2, 1: 4}},
}
},
topology["objects"],
)
def test_dedup_when_an_old_line_abcdbe_self_intersects_and_shares_a_point_b_abcdbe_is_cut_into_ab_bcdb_be_and_fbg_is_cut_into_fb_bg(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abcdbe": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [2, 0], [3, 0], [1, 0], [4, 0]],
},
"fbg": {"type": "LineString", "arcs": [[0, 1], [1, 0], [2, 1]]},
}
)
)
)
self.assertDictEqual(
{
"abcdbe": {
"type": "LineString",
"arcs": {0: 0, 1: 1, "next": {0: 1, 1: 4, "next": {0: 4, 1: 5}}},
},
"fbg": {
"type": "LineString",
"arcs": {0: 6, 1: 7, "next": {0: 7, 1: 8}},
},
},
topology["objects"],
)
def test_dedup_when_a_line_abca_is_closed_there_are_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "LineString",
"arcs": [[0, 0], [1, 0], [0, 1], [0, 0]],
}
}
)
)
)
self.assertDictEqual(
{"abca": {"type": "LineString", "arcs": {0: 0, 1: 3}}}, topology["objects"]
)
def test_dedup_when_a_ring_abca_is_closed_there_are_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [0, 1], [0, 0]]],
}
}
)
)
)
self.assertDictEqual(
{"abca": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]}}, topology["objects"]
)
def test_dedup_exact_duplicate_rings_abca_and_abca_have_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [0, 1], [0, 0]]],
},
"abca2": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [0, 1], [0, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abca": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
"abca2": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
},
topology["objects"],
)
def test_dedup_reversed_duplicate_rings_abca_and_acba_have_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [0, 1], [0, 0]]],
},
"acba": {
"type": "Polygon",
"arcs": [[[0, 0], [0, 1], [1, 0], [0, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abca": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
"acba": {"type": "Polygon", "arcs": [{0: 3, 1: 0}]},
},
topology["objects"],
)
def test_dedup_coincident_rings_abca_and_bcab_have_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [0, 1], [0, 0]]],
},
"bcab": {
"type": "Polygon",
"arcs": [[[1, 0], [0, 1], [0, 0], [1, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abca": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
"bcab": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
},
topology["objects"],
)
def test_dedup_coincident_reversed_rings_abca_and_bacb_have_no_cuts(self):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [0, 1], [0, 0]]],
},
"bacb": {
"type": "Polygon",
"arcs": [[[1, 0], [0, 0], [0, 1], [1, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abca": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
"bacb": {"type": "Polygon", "arcs": [{0: 3, 1: 0}]},
},
topology["objects"],
)
def test_dedup_coincident_rings_abcda_efae_and_ghcg_are_cut_into_abc_cda_efae_and_ghcg(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abcda": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]],
},
"efae": {
"type": "Polygon",
"arcs": [[[0, -1], [1, -1], [0, 0], [0, -1]]],
},
"ghcg": {
"type": "Polygon",
"arcs": [[[0, 2], [1, 2], [1, 1], [0, 2]]],
},
}
)
)
)
self.assertDictEqual(
{
"abcda": {
"type": "Polygon",
"arcs": [{0: 0, 1: 2, "next": {0: 2, 1: 4}}],
},
"efae": {"type": "Polygon", "arcs": [{0: 5, 1: 8}]},
"ghcg": {"type": "Polygon", "arcs": [{0: 9, 1: 12}]},
},
topology["objects"],
)
def test_dedup_coincident_rings_abca_and_dbed_have_no_cuts_but_are_rotated_to_share_b(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abca": {
"type": "Polygon",
"arcs": [[[0, 0], [1, 0], [0, 1], [0, 0]]],
},
"dbed": {
"type": "Polygon",
"arcs": [[[2, 1], [1, 0], [2, 2], [2, 1]]],
},
}
)
)
)
self.assertDictEqual(
{
"abca": {"type": "Polygon", "arcs": [{0: 0, 1: 3}]},
"dbed": {"type": "Polygon", "arcs": [{0: 4, 1: 7}]},
},
topology["objects"],
)
self.assertListEqual(
topology["coordinates"][:4], [[1, 0], [0, 1], [0, 0], [1, 0]]
)
self.assertListEqual(
topology["coordinates"][4:], [[1, 0], [2, 2], [2, 1], [1, 0]]
)
def test_dedup_overlapping_rings_abcda_and_befcb_are_cut_into_bc_cdab_and_befc_cb(
self,
):
topology = self.dedup(
self.cut(
self.extract(
{
"abcda": {
"type": "Polygon",
"arcs": [
[[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]
], # rotated to BCDAB, cut BC-CDAB
},
"befcb": {
"type": "Polygon",
"arcs": [[[1, 0], [2, 0], [2, 1], [1, 1], [1, 0]]],
},
}
)
)
)
self.assertDictEqual(
{
"abcda": {
"type": "Polygon",
"arcs": [{0: 0, 1: 1, "next": {0: 1, 1: 4}}],
},
"befcb": {
"type": "Polygon",
"arcs": [{0: 5, 1: 8, "next": {0: 1, 1: 0}}],
},
},
topology["objects"],
)
| 31.82983
| 143
| 0.322159
| 2,736
| 31,798
| 3.520468
| 0.050073
| 0.035922
| 0.035195
| 0.069041
| 0.876765
| 0.849356
| 0.831292
| 0.781458
| 0.74076
| 0.704215
| 0
| 0.061195
| 0.516416
| 31,798
| 998
| 144
| 31.861723
| 0.565195
| 0.001415
| 0
| 0.476554
| 0
| 0
| 0.106677
| 0
| 0
| 0
| 0
| 0
| 0.044711
| 1
| 0.043621
| false
| 0
| 0.002181
| 0
| 0.046892
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9f55fe6616245c3076c041f7935a7cc358002aa4
| 4,469
|
py
|
Python
|
sensepy/tests/test_capture.py
|
Mojusko/sensepy
|
550a15859791799db5aba93580913317c1905b2e
|
[
"MIT"
] | 1
|
2022-02-01T21:58:06.000Z
|
2022-02-01T21:58:06.000Z
|
sensepy/tests/test_capture.py
|
Mojusko/sensepy
|
550a15859791799db5aba93580913317c1905b2e
|
[
"MIT"
] | null | null | null |
sensepy/tests/test_capture.py
|
Mojusko/sensepy
|
550a15859791799db5aba93580913317c1905b2e
|
[
"MIT"
] | null | null | null |
# This source code is part of the sensepy package and is distributed
# under the License. Please see 'LICENSE.rst' for further
# information.
__author__ = "Mojmir Mutny"
__copyright__ = "Copyright (c) 2022 Mojmir Mutny, ETH Zurich"
__credits__ = ["Mojmir Mutny"]
__license__ = "MIT Licence"
__version__ = "0.1"
__email__ = "mojmir.mutny@inf.ethz.ch"
__status__ = "DEV"
from sensepy import PoissonRateEstimator
from sensepy import PoissonPointProcess
from sensepy import CaptureUCB, CaptureThompson, CaptureIDS
from stpy import HierarchicalBorelSets, BorelSet
from stpy import KernelFunction
import torch
import pytest
@pytest.fixture
def example_setup_count_record():
d = 1
gamma = 0.1
B = 4.
b = 1.
m = 64
process = PoissonPointProcess(d=1, B=B, b=b)
levels = 6
action_level = 5
hierarchical_structure = HierarchicalBorelSets(d=1, interval=(-1, 1), levels=levels)
basic_sets = hierarchical_structure.get_sets_level(hierarchical_structure.levels)
actions = hierarchical_structure.get_sets_level(action_level)
k = KernelFunction(gamma=gamma, kappa=B, d = d)
estimator = PoissonRateEstimator(process, hierarchical_structure, kernel_object=k, B=B + b, b=b, m=m, jitter=10e-3,
estimator='likelihood', uncertainty='laplace', approx='ellipsoid',
feedback='count-record')
vol = basic_sets[0].volume()
dt = 1. / (vol * b)
data = []
estimator.load_data(data)
estimator.fit_gp()
w = lambda s: s.volume()
return [process, estimator, w, actions, data, dt]
@pytest.fixture
def example_setup_histogram():
d = 1
gamma = 0.1
B = 4.
b = 1.
m = 64
process = PoissonPointProcess(d=1, B=B, b=b)
levels = 6
action_level = 5
hierarchical_structure = HierarchicalBorelSets(d=1, interval=(-1, 1), levels=levels)
basic_sets = hierarchical_structure.get_sets_level(hierarchical_structure.levels)
actions = hierarchical_structure.get_sets_level(action_level)
k = KernelFunction(gamma=gamma, kappa=B, d = d)
estimator = PoissonRateEstimator(process, hierarchical_structure, kernel_object=k, B=B + b, b=b, m=m, jitter=10e-3,
estimator='likelihood', uncertainty='laplace', approx='ellipsoid',
feedback='histogram')
vol = basic_sets[0].volume()
dt = 1. / (vol * b)
data = []
estimator.load_data(data)
estimator.fit_gp()
w = lambda s: s.volume()
return [process, estimator, w, actions, data, dt]
def test_capture_ucb_count_record(example_setup_count_record):
[process, estimator, w, actions, data, dt] = example_setup_count_record
Bandit = CaptureUCB(process, estimator, w, initial_data=data, dt=dt, topk=1)
T = 10
for t in range(T):
Bandit.fit_estimator()
cost, events, _, _ = Bandit.step(actions, verbose=False)
assert (t+1==T)
def test_capture_ids_count_record(example_setup_count_record):
[process, estimator, w, actions, data, dt] = example_setup_count_record
Bandit = CaptureIDS(process, estimator, w, initial_data=data, dt=dt, topk=1)
T = 10
for t in range(T):
Bandit.fit_estimator()
cost, events, _, _ = Bandit.step(actions, verbose=False)
assert (t+1==T)
def test_capture_thompson_count_record(example_setup_count_record):
[process, estimator, w, actions, data, dt] = example_setup_count_record
estimator.steps = 5 # set steps to low number.
Bandit = CaptureThompson(process, estimator, w, initial_data=data, dt=dt, topk=1)
T = 10
for t in range(T):
Bandit.fit_estimator()
cost, events, _, _ = Bandit.step(actions, verbose=False)
assert (t+1==T)
def test_capture_ucb_histogram(example_setup_histogram):
[process, estimator, w, actions, data, dt] = example_setup_histogram
Bandit = CaptureUCB(process, estimator, w, initial_data=data, dt=dt, topk=1)
T = 10
for t in range(T):
Bandit.fit_estimator()
cost, events, _, _ = Bandit.step(actions, verbose=False)
assert (t+1==T)
def test_capture_ids_histogram(example_setup_histogram):
[process, estimator, w, actions, data, dt] = example_setup_histogram
Bandit = CaptureIDS(process, estimator, w, initial_data=data, dt=dt, topk=1)
T = 10
for t in range(T):
Bandit.fit_estimator()
cost, events, _, _ = Bandit.step(actions, verbose=False)
assert (t+1==T)
def test_capture_ucb_histogram_batch(example_setup_histogram):
[process, estimator, w, actions, data, dt] = example_setup_histogram
Bandit = CaptureUCB(process, estimator, w, initial_data=data, dt=dt, topk=2)
T = 10
for t in range(T):
Bandit.fit_estimator()
cost, events, _, _ = Bandit.step(actions, verbose=False)
assert (t + 1 == T)
| 31.695035
| 116
| 0.735064
| 639
| 4,469
| 4.920188
| 0.200313
| 0.053435
| 0.0757
| 0.061069
| 0.811387
| 0.793575
| 0.793575
| 0.793575
| 0.793575
| 0.793575
| 0
| 0.017521
| 0.144328
| 4,469
| 141
| 117
| 31.695035
| 0.804655
| 0.035802
| 0
| 0.756757
| 0
| 0
| 0.042044
| 0.005575
| 0
| 0
| 0
| 0
| 0.054054
| 1
| 0.072072
| false
| 0
| 0.063063
| 0
| 0.153153
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9fcb01af2806084c1922705bc17678b59da93f49
| 332
|
py
|
Python
|
machin/parallel/distributed/__init__.py
|
ikamensh/machin
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
[
"MIT"
] | 1
|
2021-04-01T21:21:23.000Z
|
2021-04-01T21:21:23.000Z
|
machin/parallel/distributed/__init__.py
|
ikamensh/machin
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
[
"MIT"
] | null | null | null |
machin/parallel/distributed/__init__.py
|
ikamensh/machin
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
[
"MIT"
] | null | null | null |
from .world import (
World,
CollectiveGroup,
RpcGroup,
get_world,
get_cur_rank,
get_cur_name,
is_world_initialized,
)
from . import world
__all__ = [
"World",
"CollectiveGroup",
"RpcGroup",
"get_world",
"get_cur_rank",
"get_cur_name",
"is_world_initialized",
"world",
]
| 14.434783
| 27
| 0.614458
| 36
| 332
| 5.166667
| 0.333333
| 0.129032
| 0.301075
| 0.333333
| 0.795699
| 0.795699
| 0.795699
| 0.795699
| 0.795699
| 0.795699
| 0
| 0
| 0.271084
| 332
| 22
| 28
| 15.090909
| 0.768595
| 0
| 0
| 0
| 0
| 0
| 0.259036
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ca38773cae1298b08ad0ad357382e07f7fb96be
| 1,528
|
py
|
Python
|
test/check_data.py
|
amiralansary/BrainSurfaceTK
|
17e3ef5e1c5d6e1a75293fbe031977ec3fbe0fef
|
[
"MIT"
] | null | null | null |
test/check_data.py
|
amiralansary/BrainSurfaceTK
|
17e3ef5e1c5d6e1a75293fbe031977ec3fbe0fef
|
[
"MIT"
] | null | null | null |
test/check_data.py
|
amiralansary/BrainSurfaceTK
|
17e3ef5e1c5d6e1a75293fbe031977ec3fbe0fef
|
[
"MIT"
] | null | null | null |
from nilearn import surface
import pyvista as pv
surf_data = surface.load_surf_data('../data/parcellation/ico6_white_surfaces/sub-CC00060XX03_ses-12501_left_white.40k_fs_LR.surf.gii')
label_data_1 = surface.load_surf_data('../data/parcellation/labels_corrected/sub-CC00060XX03_ses-12501_R.label.gii')
label_data_2 = surface.load_surf_data('../data/parcellation/labels_corrected/sub-CC00062XX05_ses-13801_R.label.gii')
label_data_3 = surface.load_surf_data('../data/parcellation/labels_corrected/sub-CC00065XX08_ses-18600_R.label.gii')
label_data_1 = surface.load_surf_data('../data/parcellation/new_labels_for_amir/labels/sub-CC00060XX03_ses-12501_R.label.gii')
label_data_2 = surface.load_surf_data('../data/parcellation/new_labels_for_amir/labels/sub-CC00062XX05_ses-13801_R.label.gii')
label_data_3 = surface.load_surf_data('../data/parcellation/new_labels_for_amir/labels/sub-CC00065XX08_ses-18600_R.label.gii')
label_data_3 = surface.load_surf_data('../data/parcellation/surf_feat_label_vtp_new/sub-CC00060XX03_ses-12501_left_white.40k_fs_LR.surf.shape.label.vtp')
mesh_1 = pv.read('../data/parcellation/surf_feat_label_vtp_new/sub-CC00060XX03_ses-12501_right_white.40k_fs_LR.surf.shape.label.vtp')
mesh_2 = pv.read('../data/parcellation/surf_feat_label_vtp_new/sub-CC00062XX05_ses-13801_right_white.40k_fs_LR.surf.shape.label.vtp')
mesh_3 = pv.read('../data/parcellation/surf_feat_label_vtp_new/sub-CC00065XX08_ses-18600_right_white.40k_fs_LR.surf.shape.label.vtp')
# Get points
points = torch.tensor(mesh.points)
| 63.666667
| 153
| 0.834424
| 253
| 1,528
| 4.640316
| 0.185771
| 0.149915
| 0.102215
| 0.129472
| 0.878194
| 0.878194
| 0.848382
| 0.848382
| 0.848382
| 0.726576
| 0
| 0.10387
| 0.035995
| 1,528
| 23
| 154
| 66.434783
| 0.693143
| 0.006545
| 0
| 0
| 0
| 0.285714
| 0.677441
| 0.677441
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4cc386b20a74ce65593243982d9fe8da6c2e2efe
| 144
|
py
|
Python
|
event_chain/app/views/__init__.py
|
ArcBlock/event-chain
|
50a37c76ab094386fc66c985f4174f8dabc98ad5
|
[
"MIT"
] | null | null | null |
event_chain/app/views/__init__.py
|
ArcBlock/event-chain
|
50a37c76ab094386fc66c985f4174f8dabc98ad5
|
[
"MIT"
] | null | null | null |
event_chain/app/views/__init__.py
|
ArcBlock/event-chain
|
50a37c76ab094386fc66c985f4174f8dabc98ad5
|
[
"MIT"
] | null | null | null |
from event_chain.app.views.admin import admin
from event_chain.app.views.events import events
from event_chain.app.views.tickets import tickets
| 36
| 49
| 0.854167
| 24
| 144
| 5
| 0.375
| 0.225
| 0.35
| 0.425
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 144
| 3
| 50
| 48
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
980e7c237fd92e4bf5deb97fa4d57c19f72691b4
| 207
|
py
|
Python
|
lib/session/__init__.py
|
kuro2a/kiku
|
d4e6500970a20d1955f1773e0e2cfb8e2db819ba
|
[
"MIT"
] | 2
|
2019-08-14T14:32:36.000Z
|
2019-08-15T08:28:15.000Z
|
lib/session/__init__.py
|
kuro2a/kiku
|
d4e6500970a20d1955f1773e0e2cfb8e2db819ba
|
[
"MIT"
] | 1
|
2019-10-02T16:35:05.000Z
|
2019-10-02T16:35:05.000Z
|
lib/session/__init__.py
|
kuro2a/kiku
|
d4e6500970a20d1955f1773e0e2cfb8e2db819ba
|
[
"MIT"
] | 1
|
2019-08-14T14:33:01.000Z
|
2019-08-14T14:33:01.000Z
|
#!/usr/bin/python3
from lib.session.BaseSessionService import *
from lib.session.LocalSessionService import *
from lib.session.RedisSessionService import *
from lib.session.MemcachedSessionService import *
| 29.571429
| 49
| 0.830918
| 23
| 207
| 7.478261
| 0.478261
| 0.162791
| 0.325581
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005291
| 0.086957
| 207
| 6
| 50
| 34.5
| 0.904762
| 0.082126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e23faa5ad9531e7486aec863156fb89bd0b77e29
| 8,634
|
py
|
Python
|
tests/tom/socket/plain/test_epoll.py
|
SEIAROTg/Mail.im
|
1ad31c5f82dd440100a16e5704a4e22fc8105ead
|
[
"MIT"
] | null | null | null |
tests/tom/socket/plain/test_epoll.py
|
SEIAROTg/Mail.im
|
1ad31c5f82dd440100a16e5704a4e22fc8105ead
|
[
"MIT"
] | null | null | null |
tests/tom/socket/plain/test_epoll.py
|
SEIAROTg/Mail.im
|
1ad31c5f82dd440100a16e5704a4e22fc8105ead
|
[
"MIT"
] | null | null | null |
import time
import pytest
from faker import Faker
from ...socket_test_helper import SocketTestHelper
from src.tom._mailbox.packet import PlainPacket as Packet
def test_empty(helper: SocketTestHelper):
socket0 = helper.create_connected_socket()
socket1 = helper.create_connected_socket()
socket2 = helper.create_listening_socket()
sockets = {socket0, socket1, socket2}
epoll = helper.create_epoll()
epoll.add(sockets, sockets)
rrset, rxset = epoll.wait(timeout=0)
assert not rrset
assert not rxset
@pytest.mark.timeout(5)
def test_read_recv(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
endpoints = helper.fake_endpoints()
socket = helper.create_connected_socket(*endpoints)
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 0, 0, set(), payload),
}), 0.5)
rrset, rxset = epoll.wait()
assert rrset == {socket}
assert not rxset
@pytest.mark.timeout(5)
def test_read_recv_reset(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
endpoints = helper.fake_endpoints()
socket = helper.create_connected_socket(*endpoints)
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 0, 0, set(), payload),
}), 0.5)
socket.recv(111)
rrset, rxset = epoll.wait(timeout=0)
assert not rrset
assert not rxset
@pytest.mark.timeout(5)
def test_read_recv_not_reset(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
endpoints = helper.fake_endpoints()
socket = helper.create_connected_socket(*endpoints)
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 0, 0, set(), payload),
}), 0.5)
socket.recv(1)
rrset, rxset = epoll.wait(timeout=0)
assert rrset == {socket}
assert not rxset
@pytest.mark.timeout(5)
def test_read_recv_order(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
endpoints = helper.fake_endpoints()
socket = helper.create_connected_socket(*endpoints)
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 1, 0, set(), payload),
})
rrset, rxset = epoll.wait(timeout=0.5)
assert not rrset
assert not rxset
@pytest.mark.timeout(5)
def test_read_recv_empty_packet(faker: Faker, helper: SocketTestHelper):
endpoints = helper.fake_endpoints()
socket = helper.create_connected_socket(*endpoints)
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 0, 0, set(), b''),
})
rrset, rxset = epoll.wait(timeout=0.5)
assert not rrset
assert not rxset
@pytest.mark.timeout(5)
def test_read_recv_empty_packet_followed_by_non_empty_packets(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
endpoints = helper.fake_endpoints()
socket = helper.create_connected_socket(*endpoints)
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 0, 0, set(), b''),
}), 0.5)
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 1, 0, set(), payload),
}), 1)
rrset, rxset = epoll.wait()
assert rrset == {socket}
assert not rxset
@pytest.mark.timeout(5)
def test_read_recv_empty_packet_followed_by_non_empty_packets_reversed(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
endpoints = helper.fake_endpoints()
socket = helper.create_connected_socket(*endpoints)
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 0, 0, set(), b''),
}), 1)
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 1, 0, set(), payload),
}), 0.5)
rrset, rxset = epoll.wait()
assert rrset == {socket}
assert not rxset
@pytest.mark.timeout(5)
def test_read_accept(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
endpoints = helper.fake_endpoints()
socket = helper.create_listening_socket(endpoints[0])
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 0, 0, set(), payload, is_syn=True),
}), 0.5)
rrset, rxset = epoll.wait()
assert rrset == {socket}
assert not rxset
@pytest.mark.timeout(5)
def test_read_accept_reset(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
endpoints = helper.fake_endpoints()
socket = helper.create_listening_socket(endpoints[0])
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(*reversed(endpoints), 0, 0, set(), payload, is_syn=True),
}), 0.5)
socket.accept()
rrset, rxset = epoll.wait(timeout=0)
assert not rrset
assert not rxset
@pytest.mark.timeout(5)
def test_read_accept_not_reset(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
local_endpoint = helper.fake_endpoint()
endpoints = [helper.fake_endpoint() for i in range(2)]
socket = helper.create_listening_socket(local_endpoint)
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(lambda: helper.feed_messages({
faker.pyint(): Packet(endpoints[i], local_endpoint, 0, 0, set(), payload, is_syn=True) for i in range(2)
}), 0.5)
socket.accept()
rrset, rxset = epoll.wait()
assert rrset == {socket}
assert not rxset
@pytest.mark.timeout(5)
def test_error_recv(faker: Faker, helper: SocketTestHelper):
socket = helper.create_connected_socket()
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(socket.close, 0.5)
rrset, rxset = epoll.wait()
assert not rrset
assert rxset == {socket}
@pytest.mark.timeout(5)
def test_error_accept(faker: Faker, helper: SocketTestHelper):
socket = helper.create_listening_socket()
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.defer(socket.close, 0.5)
rrset, rxset = epoll.wait()
assert not rrset
assert rxset == {socket}
@pytest.mark.timeout(5)
def test_error_max_attempts(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
socket = helper.create_connected_socket()
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
socket.send(payload)
rrset, rxset = epoll.wait()
assert not rrset
assert rxset == {socket}
@pytest.mark.timeout(5)
def test_remove(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
endpoints = helper.fake_endpoints()
socket = helper.create_connected_socket(*endpoints)
epoll = helper.create_epoll()
epoll.add({socket}, {socket})
helper.feed_messages({faker.pyint(): Packet(*reversed(endpoints), 0, 0, set(), payload)})
epoll.remove({socket}, set())
rrset, rxset = epoll.wait(timeout=0.5)
assert not rrset
assert not rxset
@pytest.mark.timeout(5)
def test_multiple(faker: Faker, helper: SocketTestHelper):
payload = faker.binary(111)
local_endpoint = helper.fake_endpoint()
endpoints = [helper.fake_endpoint() for i in range(3)]
sockets = [helper.create_connected_socket(local_endpoint, endpoints[i]) for i in range(3)]
epoll = helper.create_epoll()
epoll.add(set(sockets), set(sockets))
helper.feed_messages({
faker.pyint(): Packet(endpoints[i], local_endpoint, 0, 0, set(), payload) for i in range(3)
})
time.sleep(0.5)
rrset, rxset = epoll.wait()
assert rrset == set(sockets)
assert not rxset
sockets[0].recv(1)
rrset, rxset = epoll.wait()
assert rrset == set(sockets)
assert not rxset
sockets[0].recv(110)
rrset, rxset = epoll.wait()
assert rrset == {sockets[1], sockets[2]}
assert not rxset
sockets[0].close()
sockets[1].close()
rrset, rxset = epoll.wait()
assert rrset == {sockets[1], sockets[2]}
assert rxset == {sockets[0], sockets[1]}
| 29.772414
| 112
| 0.682187
| 1,098
| 8,634
| 5.23133
| 0.071038
| 0.071031
| 0.049617
| 0.062848
| 0.903029
| 0.883357
| 0.872737
| 0.850975
| 0.836873
| 0.836873
| 0
| 0.020394
| 0.182187
| 8,634
| 289
| 113
| 29.875433
| 0.793089
| 0
| 0
| 0.798206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170404
| 1
| 0.071749
| false
| 0
| 0.022422
| 0
| 0.09417
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e2589b1c7783915705a99d953824d0f7bab2e9e5
| 2,609
|
py
|
Python
|
train/train_unet/see_net.py
|
Tiexin-RS/segment-with-nn
|
f008f436e2fb3dc7a32d58dcf8bd45b5c5d8aed9
|
[
"MIT"
] | 1
|
2021-03-18T11:12:05.000Z
|
2021-03-18T11:12:05.000Z
|
train/train_unet/see_net.py
|
Tiexin-RS/segment-with-nn
|
f008f436e2fb3dc7a32d58dcf8bd45b5c5d8aed9
|
[
"MIT"
] | 9
|
2021-02-28T14:01:46.000Z
|
2021-05-12T05:14:38.000Z
|
train/train_unet/see_net.py
|
Tiexin-RS/segment-with-nn
|
f008f436e2fb3dc7a32d58dcf8bd45b5c5d8aed9
|
[
"MIT"
] | 2
|
2021-03-18T11:12:12.000Z
|
2021-03-25T01:45:11.000Z
|
import tensorflow as tf
from tensorflow.lite.python import convert
from tensorflow.python.keras.metrics import Precision
from segelectri.loss_metrics.loss import FocalLoss, LovaszLoss, DiceLoss, BoundaryLoss
import tensorflow_model_optimization as tfmot
from tensorflow.python.compiler.tensorrt import trt_convert as trt
import pathlib
if __name__ == "__main__":
model = tf.keras.models.load_model("../train_unet/exp/37/saved_model",custom_objects={'LovaszLoss': LovaszLoss})
model.summary()
"""
params = trt.DEFAULT_TRT_CONVERSION_PARAMS
params._replace(precision_mode = trt.TrtPrecisionMode.FP32)
converter = trt.TrtGraphConverterV2(input_saved_model_dir='../train_unet/exp/36/saved_model',conversion_params=params)
converter.convert()
converter.save('trt_savedmodel')
before:2922881
after:127084028
"""
"""
converter = tf.lite.TFLiteConverter.from_saved_model('../train_unet/exp/36/saved_model')
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("/tmp/unet_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
origin_byte = tflite_model_file.write_bytes(tflite_model)
print(origin_byte)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quant_model = converter.convert()
tflite_model_quant_file = tflite_models_dir/"unet_model_quant.tflite"
after_byte = tflite_model_file.write_bytes(tflite_quant_model)
print(after_byte)
before:2922881
after:127084028
"""
"""
params = trt.DEFAULT_TRT_CONVERSION_PARAMS
params._replace(precision_mode = trt.TrtPrecisionMode.INT8)
converter = trt.TrtGraphConverterV2(input_saved_model_dir='../train_unet/exp/37/saved_model',conversion_params=params)
converter.convert()
converter.save('trt_savedmodel')
before:2809876
after:127003352
"""
"""converter = tf.lite.TFLiteConverter.from_saved_model('../train_unet/exp/37/saved_model')
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("./tmp/unet_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"unet_model.tflite"
origin_byte = tflite_model_file.write_bytes(tflite_model)
print(origin_byte)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quant_model = converter.convert()
tflite_model_quant_file = tflite_models_dir/"unet_model_quant.tflite"
after_byte = tflite_model_file.write_bytes(tflite_quant_model)
print(after_byte)"""
| 41.412698
| 122
| 0.766577
| 330
| 2,609
| 5.70303
| 0.233333
| 0.070138
| 0.063762
| 0.057386
| 0.762487
| 0.762487
| 0.757173
| 0.740701
| 0.740701
| 0.740701
| 0
| 0.02779
| 0.131085
| 2,609
| 63
| 123
| 41.412698
| 0.802382
| 0
| 0
| 0
| 0
| 0
| 0.093458
| 0.059813
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.7
| 0
| 0.7
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e29de2d0aa2961091ce3211e61976670163e77b0
| 24,440
|
py
|
Python
|
x86/bitwise.py
|
c01db33f/reil
|
3deec3a3bb69aae51cc0d728d5f83156cfba2ab6
|
[
"Apache-2.0"
] | 27
|
2015-03-16T13:28:00.000Z
|
2021-08-02T02:58:23.000Z
|
x86/bitwise.py
|
c01db33f/pyreil
|
3deec3a3bb69aae51cc0d728d5f83156cfba2ab6
|
[
"Apache-2.0"
] | 2
|
2015-02-23T12:18:53.000Z
|
2015-03-15T20:31:16.000Z
|
x86/bitwise.py
|
c01db33f/reil
|
3deec3a3bb69aae51cc0d728d5f83156cfba2ab6
|
[
"Apache-2.0"
] | 9
|
2016-03-22T18:59:12.000Z
|
2022-02-05T08:18:28.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2014 Mark Brand - c01db33f (at) gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reil.x86.bitwise - x86 and x86_64 translators
This module generates REIL (reverse engineering intermediate language)
IL from x86 and x86_64 machine code.
This file is responsible for translation of basic instructions that are
all about twiddling bits and bytes
"""
import capstone
import capstone.x86
import reil
import reil.error
from reil.shorthand import *
from reil.utilities import *
import reil.x86.conditional as conditional
import reil.x86.operand as operand
from reil.x86.utilities import *
def _shift_set_flags(ctx, result):
size = result.size
sign_result = ctx.tmp(size)
ctx.emit( and_ (result, imm(sign_bit(size), size), sign_result))
# compute sign flag (easy...)
ctx.emit( bisnz_(sign_result, r('sf', 8)))
# compute zero flag (easy...)
ctx.emit( bisz_ (result, r('zf', 8)))
# TODO: compute adjust flag? expensive...
set_pf(ctx, result)
def _read_bit(ctx, i, base_index, offset_index):
bit = ctx.tmp(8)
if operand.is_memory(ctx, i, base_index):
# nasty case, indexing into in-memory bitstring; offset can be
# > word_size
base = operand.get_address(ctx, i, base_index)
offset = operand.get(ctx, i, offset_index)
offset_sign = ctx.tmp(8)
byte_offset = ctx.tmp(base.size)
tmp0 = ctx.tmp(offset.size)
tmp1 = ctx.tmp(offset.size)
tmp2 = ctx.tmp(offset.size)
byte = ctx.tmp(8)
bitmask = ctx.tmp(8)
ctx.emit( and_ (offset, imm(sign_bit(offset.size), offset.size), tmp0))
ctx.emit( bisnz_(tmp0, offset_sign))
ctx.emit( and_ (offset, imm(~sign_bit(offset.size), offset.size), tmp1))
ctx.emit( div_ (tmp1, imm(8, offset.size), byte_offset))
ctx.emit( mod_ (tmp1, imm(8, offset.size), tmp2))
ctx.emit( jcc_ (offset_sign, 'negative_offset'))
ctx.emit( add_ (base, byte_offset, base))
ctx.emit( jcc_ (imm(1, 8), 'base_calculated'))
ctx.emit('negative_offset')
ctx.emit( sub_ (base, byte_offset, base))
ctx.emit('base_calculated')
ctx.emit( ldm_ (base, byte))
ctx.emit( lshl_ (imm(1, 8), tmp2, bitmask))
ctx.emit( and_ (byte, bitmask, byte))
ctx.emit( bisnz_(byte, bit))
else:
# simple case, it's a register
a = operand.get(ctx, i, base_index)
offset = operand.get(ctx, i, offset_index)
bitmask = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
ctx.emit( lshl_ (imm(1, a.size), offset, bitmask))
ctx.emit( and_ (a, bitmask, tmp0))
ctx.emit( bisnz_(tmp0, bit))
return bit
def _write_bit(ctx, i, base_index, offset_index, bit):
if operand.is_memory(ctx, i, base_index):
# nasty case, indexing into in-memory bitstring; offset can be
# > word_size
base = operand.get_address(ctx, i, base_index)
offset = operand.get(ctx, i, offset_index)
offset_sign = ctx.tmp(8)
byte_offset = ctx.tmp(base.size)
tmp0 = ctx.tmp(offset.size)
byte = ctx.tmp(8)
bitmask = ctx.tmp(8)
ctx.emit( and_ (offset, imm(sign_bit(offset.size), offset.size), tmp0))
ctx.emit( bisnz_(tmp0, offset_sign))
ctx.emit( and_ (offset, imm(~sign_bit(offset.size), offset.size), offset))
ctx.emit( div_ (offset, imm(8, offset.size), byte_offset))
ctx.emit( mod_ (offset, imm(8, offset.size), offset))
ctx.emit( jcc_ (offset_sign, 'negative_offset'))
ctx.emit( add_ (base, byte_offset, base))
ctx.emit( jcc_ (imm(1, 8), 'base_calculated'))
ctx.emit('negative_offset')
ctx.emit( sub_ (base, byte_offset, base))
ctx.emit('base_calculated')
ctx.emit( ldm_ (base, byte))
ctx.emit( lshl_ (imm(1, 8), offset, bitmask))
ctx.emit( xor_ (bitmask, imm(mask(8), 8), bitmask))
ctx.emit( and_ (byte, bitmask, byte))
ctx.emit( lshl_ (bit, offset, bitmask))
ctx.emit( or_ (byte, bit, byte))
ctx.emit( stm_ (byte, base))
else:
# simple case, it's a register
a = operand.get(ctx, i, base_index)
offset = operand.get(ctx, i, offset_index)
bitmask = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
tmp1 = ctx.tmp(a.size)
ctx.emit( lshl_ (imm(1, a.size), offset, bitmask))
ctx.emit( xor_ (bitmask, imm(mask(a.size), a.size), bitmask))
ctx.emit( and_ (a, bitmask, tmp0))
ctx.emit( str_ (bit, tmp1))
ctx.emit( lshl_ (tmp1, offset, tmp1))
ctx.emit( or_ (tmp0, tmp1, tmp1))
operand.set(ctx, i, base_index, tmp1)
# Instruction Translators
def x86_bextr(ctx, i):
a = operand.get(ctx, i, 1)
b = operand.get(ctx, i, 2)
start = ctx.tmp(8)
length = ctx.tmp(8)
mask = ctx.tmp(a.size)
tmp0 = ctx.tmp(8)
result = ctx.tmp(a.size)
ctx.emit( str_ (b, start))
ctx.emit( lshr_ (b, imm(8, 8), length))
# we are masking off [11111[start + length , start]111111]
ctx.emit( sub_ (imm(a.size, a.size), length, tmp0))
ctx.emit( lshr_ (imm(mask(a.size), a.size), tmp0, mask))
# [[start + length, start]111111]
ctx.emit( add_ (tmp0, start, tmp0))
ctx.emit( lshl_ (mask, tmp0, mask))
# [000000000000[start + length, start]]
ctx.emit( lshr_ (mask, start, mask))
# we have our mask [00000[start + length , start]000000]
ctx.emit( and_ (a, mask, result))
ctx.emit( lshr_ (result, start, result))
set_zf(ctx, result)
ctx.emit( str_ (imm(0, 8), r('cf', 8)))
ctx.emit( undef_(r('af', 8)))
ctx.emit( undef_(r('sf', 8)))
ctx.emit( undef_(r('pf', 8)))
operand.set(ctx, i, 0, result)
def x86_blsi(ctx, i):
a = operand.get(ctx, i, 1)
bit = imm(sign_bit(a.size), a.size)
index = imm(a.size, a.size)
bit = ctx.tmp(a.size)
index = ctx.tmp(a.size)
result = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
ctx.emit( jcc_ (a, 'non-zero'))
# if a is zero
ctx.emit( str_ (imm(1, 8), r('zf', 8)))
ctx.emit( str_ (imm(0, 8), r('cf', 8)))
ctx.emit( jcc_ (imm(1, 8), 'done'))
# set up loop variables and clear zf
ctx.emit('non-zero')
ctx.emit( str_ (imm(0, 8), r('zf', 8)))
ctx.emit( str_ (imm(0, a.size), index))
ctx.emit( str_ (imm(1, a.size), bit))
# LOOP
ctx.emit('loop')
ctx.emit( and_ (a, bit, tmp0))
ctx.emit( jcc_ (tmp0, 'found'))
# update these for the next one
ctx.emit( add_ (index, imm(1, a.size), index))
ctx.emit( lshl_ (bit, imm(1, a.size), bit))
ctx.emit( jcc_ (imm(1, 8), 'loop'))
# non-zero case epilogue
ctx.emit('found')
ctx.emit( str_ (imm(1, a.size), result))
ctx.emit( lshl_ (result, index, result))
operand.set(ctx, i, 0, result, clear=True)
set_sf(ctx, result)
ctx.emit( str_ (imm(0, 8), r('zf', 8)))
ctx.emit( str_ (imm(1, 8), r('cf', 8)))
ctx.emit('done')
ctx.emit( str_ (imm(0, 8), r('of', 8)))
ctx.emit( undef_(r('pf', 8)))
ctx.emit( undef_(r('af', 8)))
def x86_blsmsk(ctx, i):
a = operand.get(ctx, i, 1)
bit = imm(sign_bit(a.size), a.size)
index = imm(a.size, a.size)
bit = ctx.tmp(a.size)
index = ctx.tmp(a.size)
result = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
ctx.emit( jcc_ (a, 'non-zero'))
# if a is zero
ctx.emit( str_ (imm(0, 8), r('cf', 8)))
ctx.emit( jcc_ (imm(1, 8), 'done'))
# set up loop variables and clear zf
ctx.emit('non-zero')
ctx.emit( str_ (imm(0, 8), r('zf', 8)))
ctx.emit( str_ (imm(0, a.size), index))
ctx.emit( str_ (imm(1, a.size), bit))
# LOOP
ctx.emit('loop')
ctx.emit( and_ (a, bit, tmp0))
ctx.emit( jcc_ (tmp0, 'found'))
# update these for the next one
ctx.emit( add_ (index, imm(1, a.size), index))
ctx.emit( lshl_ (bit, imm(1, a.size), bit))
ctx.emit( jcc_ (imm(1, 8), 'loop'))
# non-zero case epilogue
ctx.emit('found')
ctx.emit( str_ (imm(mask(a.size), a.size), result))
ctx.emit( lshl_ (result, index, result))
ctx.emit( lshr_ (result, index, result))
ctx.emit( xor_ (imm(mask(a.size), a.size), result, result))
operand.set(ctx, i, 0, result, clear=True)
set_sf(ctx, result)
ctx.emit( str_ (imm(1, 8), r('cf', 8)))
ctx.emit('done')
ctx.emit( str_ (imm(0, 8), r('zf', 8)))
ctx.emit( str_ (imm(0, 8), r('of', 8)))
ctx.emit( undef_(r('pf', 8)))
ctx.emit( undef_(r('af', 8)))
def x86_blsr(ctx, i):
a = operand.get(ctx, i, 1)
bit = imm(sign_bit(a.size), a.size)
index = imm(a.size, a.size)
bit = ctx.tmp(a.size)
index = ctx.tmp(a.size)
result = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
ctx.emit( jcc_ (a, 'non-zero'))
# if a is zero
ctx.emit( str_ (imm(1, 8), r('cf', 8)))
ctx.emit( jcc_ (imm(1, 8), 'done'))
# set up loop variables and clear zf
ctx.emit('non-zero')
ctx.emit( str_ (imm(0, 8), r('zf', 8)))
ctx.emit( str_ (imm(0, a.size), index))
ctx.emit( str_ (imm(1, a.size), bit))
# LOOP
ctx.emit('loop')
ctx.emit( and_ (a, bit, tmp0))
ctx.emit( jcc_ (tmp0, 'found'))
# update these for the next one
ctx.emit( add_ (index, imm(1, a.size), index))
ctx.emit( lshl_ (bit, imm(1, a.size), bit))
ctx.emit( jcc_ (imm(1, 8), 'loop'))
# non-zero case epilogue
ctx.emit('found')
ctx.emit( str_ (imm(1, a.size), result))
ctx.emit( lshl_ (result, index, result))
ctx.emit( xor_ (a, result, result))
operand.set(ctx, i, 0, result, clear=True)
ctx.emit( str_ (imm(0, 8), r('cf', 8)))
ctx.emit('done')
set_zf(ctx, result)
set_sf(ctx, result)
ctx.emit( str_ (imm(0, 8), r('of', 8)))
ctx.emit( undef_(r('pf', 8)))
ctx.emit( undef_(r('af', 8)))
def x86_bsf(ctx, i):
a = operand.get(ctx, i, 1)
bit = imm(sign_bit(a.size), a.size)
index = imm(a.size, a.size)
bit = ctx.tmp(a.size)
index = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
ctx.emit( jcc_ (a, 'non-zero'))
# if a is zero
ctx.emit( str_ (imm(1, 8), r('zf', 8)))
operand.undefine(ctx, i, 0)
ctx.emit( jcc_ (imm(1, 8), 'done'))
# set up loop variables and clear zf
ctx.emit('non-zero')
ctx.emit( str_ (imm(0, 8), r('zf', 8)))
ctx.emit( str_ (imm(0, a.size), index))
ctx.emit( str_ (imm(1, a.size), bit))
# LOOP
ctx.emit('loop')
ctx.emit( and_ (a, bit, tmp0))
ctx.emit( jcc_ (tmp0, 'found'))
# update these for the next one
ctx.emit( add_ (index, imm(1, a.size), index))
ctx.emit( lshl_ (bit, imm(1, a.size), bit))
ctx.emit( jcc_ (imm(1, 8), 'loop'))
# zero-case epilogue
ctx.emit('found')
operand.set(ctx, i, 0, index, clear=True)
ctx.emit('done')
ctx.emit( undef_(r('cf', 8)))
ctx.emit( undef_(r('of', 8)))
ctx.emit( undef_(r('sf', 8)))
ctx.emit( undef_(r('pf', 8)))
ctx.emit( undef_(r('af', 8)))
def x86_bsr(ctx, i):
a = operand.get(ctx, i, 1)
bit = imm(sign_bit(a.size), a.size)
index = imm(a.size, a.size)
bit = ctx.tmp(a.size)
index = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
ctx.emit( jcc_ (a, 'non-zero'))
# if a is zero
ctx.emit( str_ (imm(1, 8), r('zf', 8)))
operand.undefine(ctx, i, 0)
ctx.emit( jcc_ (imm(1, 8), 'done'))
# set up loop variables and clear zf
ctx.emit('non-zero')
ctx.emit( str_ (imm(0, 8), r('zf', 8)))
ctx.emit( str_ (imm(a.size - 1, a.size), index))
ctx.emit( str_ (imm(sign_bit(a.size), a.size), bit))
# LOOP
ctx.emit('loop')
ctx.emit( and_ (a, bit, tmp0))
ctx.emit( jcc_ (tmp0, 'found'))
# update these for the next one
ctx.emit( sub_ (index, imm(1, a.size), index))
ctx.emit( lshr_ (bit, imm(1, a.size), bit))
ctx.emit( jcc_ (imm(1, 8), 'loop'))
# zero-case epilogue
ctx.emit('found')
operand.set(ctx, i, 0, index, clear=True)
ctx.emit('done')
ctx.emit( undef_(r('cf', 8)))
ctx.emit( undef_(r('of', 8)))
ctx.emit( undef_(r('sf', 8)))
ctx.emit( undef_(r('pf', 8)))
ctx.emit( undef_(r('af', 8)))
def x86_bt(ctx, i):
bit = _read_bit(ctx, i, 0, 1)
ctx.emit( str_ (bit, r('cf', 8)))
def x86_btc(ctx, i):
bit = _read_bit(ctx, i, 0, 1)
ctx.emit( str_ (bit, r('cf', 8)))
ctx.emit( bisz_ (bit, bit))
_write_bit(ctx, i, 0, 1, bit)
def x86_btr(ctx, i):
bit = _read_bit(ctx, i, 0, 1)
ctx.emit( str_ (bit, r('cf', 8)))
_write_bit(ctx, i, 0, 1, imm(0, 8))
def x86_bts(ctx, i):
bit = _read_bit(ctx, i, 0, 1)
ctx.emit( str_ (bit, r('cf', 8)))
_write_bit(ctx, i, 0, 1, imm(1, 8))
def x86_bzhi(ctx, i):
a = operand.get(ctx, i, 1)
b = operand.get(ctx, i, 2)
result = ctx.tmp(a.size)
index = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size * 2)
ctx.emit( mod_ (b, imm(a.size - 1, a.size), index))
ctx.emit( lshl_ (a, index, result))
ctx.emit( lshr_ (result, index, result))
ctx.emit( sub_ (b, imm(a.size - 1, a.size), tmp0))
ctx.emit( and_ (tmp0, imm(sign_bit(a.size * 2), a.size * 2), tmp0))
ctx.emit( bisnz_(tmp0, r('cf', 8)))
set_zf(ctx, result)
set_pf(ctx, result)
ctx.emit( str_ (imm(0, 8), r('of', 8)))
ctx.emit( undef_(r('pf', 8)))
ctx.emit( undef_(r('af', 8)))
def x86_lzcnt(ctx, i):
a = operand.get(ctx, i, 1)
bit = imm(1, a.size)
index = imm(0, a.size)
bit = ctx.tmp(a.size)
index = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
ctx.emit( jcc_ (a, 'non-zero'))
# if a is zero
ctx.emit( str_ (imm(1, 8), r('zf', 8)))
operand.set(i, 0, imm(a.size, a.size))
ctx.emit( jcc_ (imm(1, 8), 'done'))
# set up loop variables and clear zf
ctx.emit('non-zero')
ctx.emit( str_ (imm(0, 8), r('zf', 8)))
ctx.emit( str_ (imm(0, a.size), index))
ctx.emit( str_ (imm(1, a.size), bit))
# LOOP
ctx.emit('loop')
ctx.emit( and_ (a, bit, tmp0))
ctx.emit( jcc_ (tmp0, 'found'))
# update these for the next one
ctx.emit( add_ (index, imm(1, a.size), index))
ctx.emit( lshr_ (bit, imm(1, a.size), bit))
ctx.emit( jcc_ (imm(1, 8), 'loop'))
# zero-case epilogue
ctx.emit('found')
operand.set(ctx, i, 0, index, clear=True)
ctx.emit('done')
ctx.emit( undef_(r('cf', 8)))
ctx.emit( undef_(r('of', 8)))
ctx.emit( undef_(r('sf', 8)))
ctx.emit( undef_(r('pf', 8)))
ctx.emit( undef_(r('af', 8)))
def x86_rol(ctx, i):
a = operand.get(ctx, i, 0)
b = operand.get(ctx, i, 1)
max_shift = a.size-1
size = a.size
tmp0 = ctx.tmp(size)
tmp1 = ctx.tmp(8)
tmp2 = ctx.tmp(size * 2)
tmp3 = ctx.tmp(size * 2)
tmp4 = ctx.tmp(size)
tmp5 = ctx.tmp(size * 2)
tmp6 = ctx.tmp(size * 2)
tmp7 = ctx.tmp(size)
tmp8 = ctx.tmp(size)
result = ctx.tmp(size)
# the rotate amount is truncated at word_size - 1
ctx.emit( and_ (b, imm(max_shift, size), tmp0))
# zero rotate doesn't affect flags
ctx.emit( bisz_ (tmp0, tmp1))
ctx.emit( jcc_ (tmp1, 'zero_rotate'))
# zero extend
ctx.emit( str_ (a, tmp2))
# left shift by the correct amount
ctx.emit( lshl_ (tmp2, tmp0, tmp3))
# truncate to get first half of result
ctx.emit( str_ (tmp3, tmp4))
# shift out then truncate to get second half of result
ctx.emit( lshr_ (tmp3, imm(max_shift+1, size * 2), tmp5))
ctx.emit( str_ (tmp5, tmp6))
# or both halves of the result
ctx.emit( or_ (tmp4, tmp6, result))
# compute carry flag (last bit that was shifted across)
ctx.emit( and_ (result, imm(1, size), tmp7))
ctx.emit( bisnz_(tmp7, r('cf', 8)))
if isinstance(b, reil.ImmediateOperand) and b.value == 1:
# overflow flag is msb of input ^ msb output
tmp9 = ctx.tmp(size)
ctx.emit( and_ (a, imm(sign_bit(size), size), tmp8))
ctx.emit( xor_ (tmp8, tmp7, tmp8))
ctx.emit( bisnz_(tmp8, r('of', 8)))
else:
ctx.emit( undef_(r('of', 8)))
operand.set(ctx, i, 0, result)
ctx.emit( 'zero_rotate')
ctx.emit( nop_())
def x86_ror(ctx, i):
a = operand.get(ctx, i, 0)
b = operand.get(ctx, i, 1)
max_shift = a.size-1
size = a.size
tmp0 = ctx.tmp(size)
tmp1 = ctx.tmp(8)
tmp2 = ctx.tmp(size * 2)
tmp3 = ctx.tmp(size * 2)
tmp4 = ctx.tmp(size * 2)
tmp5 = ctx.tmp(size)
tmp6 = ctx.tmp(size * 2)
tmp7 = ctx.tmp(size)
tmp8 = ctx.tmp(size)
result = ctx.tmp(size)
# the rotate amount is truncated at word_size - 1
ctx.emit( and_ (b, imm(max_shift, size), tmp0))
# zero rotate doesn't affect flags
ctx.emit( bisz_ (tmp0, tmp1))
ctx.emit( jcc_ (tmp1, 'zero_rotate'))
# zero extend
ctx.emit( str_ (a, tmp2))
# left shift all the way
ctx.emit( lshl_ (tmp2, imm(max_shift+1, size * 2), tmp3))
# right shift by the correct amount
ctx.emit( lshr_ (tmp3, tmp0, tmp4))
# truncate to get first half of result
ctx.emit( str_ (tmp4, tmp5))
# shift out then truncate to get second half of result
ctx.emit( lshr_ (tmp4, imm(max_shift+1, size * 2), tmp6))
ctx.emit( str_ (tmp6, tmp7))
# or both halves of the result
ctx.emit( or_ (tmp5, tmp7, result))
# compute carry flag (last bit that was shifted across)
ctx.emit( and_ (result, imm(sign_bit(size), size), tmp8))
ctx.emit( bisnz_(tmp8, r('cf', 8)))
if isinstance(b, reil.ImmediateOperand) and b.value == 1:
# overflow flag is msb of input ^ msb output
tmp9 = ctx.tmp(size)
ctx.emit( and_ (a, imm(sign_bit(size), size), tmp9))
ctx.emit( xor_ (tmp9, tmp8, tmp9))
ctx.emit( bisnz_(tmp9, r('of', 8)))
else:
ctx.emit( undef_(r('of', 8)))
operand.set(ctx, i, 0, result)
ctx.emit( 'zero_rotate')
ctx.emit( nop_())
def x86_sar(ctx, i):
a = operand.get(ctx, i, 0)
if len(i.operands) == 1:
if i.mnemonic.endswith('1'):
b = imm(1, a.size)
else:
b = ctx.counter
else:
b = operand.get(ctx, i, 1)
max_shift = a.size-1
size = a.size
tmp0 = ctx.tmp(size)
tmp1 = ctx.tmp(size * 2)
tmp2 = ctx.tmp(size * 2)
tmp3 = ctx.tmp(size * 2)
tmp4 = ctx.tmp(size)
tmp5 = ctx.tmp(size * 2)
result = ctx.tmp(a.size)
# the shift amount is truncated at word_size - 1
ctx.emit( and_ (b, imm(max_shift, size), tmp0))
# zero extend
ctx.emit( str_ (a, tmp1))
# left shift all the way
ctx.emit( lshl_ (tmp1, imm(max_shift+1, size * 2), tmp2))
# right shift by the correct amount
ctx.emit( ashr_ (tmp2, tmp0, tmp3))
# save off the first bit that is going to be lost
ctx.emit( and_ (tmp3, imm(sign_bit(size), size * 2), tmp4))
# shift out then truncate to get second half of result
ctx.emit( ashr_ (tmp3, imm(max_shift+1, size * 2), tmp5))
ctx.emit( str_ (tmp5, result))
# set sign flag
ctx.emit( bisnz_(tmp4, r('cf', 8)))
# overflow flag is always 0
ctx.emit( str_ (imm(0, 8), r('of', 8)))
_shift_set_flags(ctx, result)
operand.set(ctx, i, 0, result)
def x86_shl(ctx, i):
a = operand.get(ctx, i, 0)
if len(i.operands) == 1:
if i.mnemonic.endswith('1'):
b = imm(1, a.size)
else:
b = ctx.counter
else:
b = operand.get(ctx, i, 1)
max_shift = a.size-1
size = a.size
tmp0 = ctx.tmp(size)
tmp1 = ctx.tmp(8)
tmp2 = ctx.tmp(size * 2)
tmp3 = ctx.tmp(size * 2)
tmp4 = ctx.tmp(size * 2)
tmp5 = ctx.tmp(8)
tmp6 = ctx.tmp(size)
tmp7 = ctx.tmp(8)
result = ctx.tmp(size)
ctx.emit( and_ (b, imm(max_shift, size), tmp0))
# zero shift doesn't affect flags
ctx.emit( bisz_ (tmp0, tmp1))
ctx.emit( jcc_ (tmp1, 'zero_shift'))
# zero extend
ctx.emit( str_ (a, tmp2))
# left shift by the correct amount
ctx.emit( lshl_ (tmp2, tmp0, tmp3))
# truncate to get result
ctx.emit( str_ (tmp3, result))
# compute carry flag
ctx.emit( and_ (tmp3, imm(carry_bit(size), size * 2), tmp4))
ctx.emit( bisnz_(tmp4, r('cf', 8)))
ctx.emit( equ_ (tmp0, imm(1, size), tmp5))
ctx.emit( bisz_ (tmp5, tmp5))
ctx.emit( jcc_ (tmp5, 'no_overflow_flag'))
# compute overflow flag
ctx.emit( and_ (result, imm(sign_bit(size), size), tmp6))
ctx.emit( bisnz_(tmp6, tmp7))
ctx.emit( xor_ (r('cf', 8), tmp7, r('of', 8)))
ctx.emit( jcc_ (imm(1, 8), 'overflow_flag_done'))
ctx.emit('no_overflow_flag')
ctx.emit( undef_(r('of', 8)))
ctx.emit('overflow_flag_done')
_shift_set_flags(ctx, result)
operand.set(ctx, i, 0, result)
ctx.emit( 'zero_shift')
ctx.emit( nop_())
def x86_shr(ctx, i):
a = operand.get(ctx, i, 0)
if len(i.operands) == 1:
if i.mnemonic.endswith('1'):
b = imm(1, a.size)
else:
b = ctx.counter
else:
b = operand.get(ctx, i, 1)
max_shift = a.size-1
size = a.size
tmp0 = ctx.tmp(size)
tmp1 = ctx.tmp(8)
tmp2 = ctx.tmp(size * 2)
tmp3 = ctx.tmp(size * 2)
tmp4 = ctx.tmp(size * 2)
tmp5 = ctx.tmp(size * 2)
tmp6 = ctx.tmp(8)
tmp7 = ctx.tmp(size)
tmp8 = ctx.tmp(size)
result = ctx.tmp(size)
# the shift amount is truncated at word_size - 1
ctx.emit( and_ (b, imm(max_shift, size), tmp0))
# zero shift doesn't affect flags
ctx.emit( bisz_ (tmp0, tmp1))
ctx.emit( jcc_ (tmp1, 'zero_shift'))
# zero extend
ctx.emit( str_ (a, tmp2))
# left shift all the way
ctx.emit( lshl_ (tmp2, imm(max_shift+1, size * 2), tmp3))
# right shift by the correct amount
ctx.emit( lshr_ (tmp3, tmp0, tmp4))
# shift out then truncate to get second half of result
ctx.emit( lshr_ (tmp4, imm(max_shift+1, size * 2), tmp5))
ctx.emit( str_ (tmp5, result))
ctx.emit( equ_ (tmp0, imm(1, size), tmp6))
ctx.emit( bisz_ (tmp6, tmp6))
ctx.emit( jcc_ (tmp6, 'no_overflow_flag'))
# compute overflow flag
ctx.emit( and_ (a, imm(sign_bit(size), size), tmp7))
ctx.emit( bisnz_(tmp7, r('of', 8)))
ctx.emit( jcc_ (imm(1, 8), 'overflow_flag_done'))
ctx.emit('no_overflow_flag')
ctx.emit( undef_(r('of', 8)))
ctx.emit('overflow_flag_done')
# compute carry flag (last bit to be shifted out)
ctx.emit( and_ (tmp4, imm(sign_bit(size), size), tmp8))
ctx.emit( bisnz_(tmp8, r('cf', 8)))
_shift_set_flags(ctx, result)
operand.set(ctx, i, 0, result)
ctx.emit( 'zero_shift')
ctx.emit( nop_())
def x86_shrd(ctx, i):
a = operand.get(ctx, i, 0)
b = operand.get(ctx, i, 1)
if len(i.operands) == 2:
c = ctx.counter
else:
c = operand.get(ctx, i, 2)
size = a.size
max_shift = size - 1
tmp0 = ctx.tmp(size)
tmp1 = ctx.tmp(size * 2)
result = ctx.tmp(size)
# the shift amount is truncated at word_size - 1
ctx.emit( and_ (c, imm(max_shift, size), tmp0))
# make a register double the size of the operands containing b a
ctx.emit( str_ (b, tmp1))
ctx.emit( lshl_ (tmp1, imm(size // 8, 8), tmp1))
ctx.emit( or_ (tmp1, a, tmp1))
# now shift right by the desired amount
ctx.emit( lshr_ (tmp1, tmp0, tmp1))
# and truncate into result
ctx.emit( str_ (tmp1, result))
# TODO: flags properly
_shift_set_flags(ctx, result)
operand.set(ctx, i, 0, result)
| 27.429854
| 84
| 0.567512
| 3,906
| 24,440
| 3.440604
| 0.071173
| 0.152095
| 0.043902
| 0.037726
| 0.823722
| 0.792246
| 0.777513
| 0.757794
| 0.740457
| 0.700201
| 0
| 0.036891
| 0.265753
| 24,440
| 890
| 85
| 27.460674
| 0.712009
| 0.148527
| 0
| 0.736648
| 0
| 0
| 0.035604
| 0
| 0
| 0
| 0
| 0.001124
| 0
| 1
| 0.038674
| false
| 0
| 0.016575
| 0
| 0.05709
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2c64dfbba098fe3c4fefeeab24d3c501676f1ad9
| 52,387
|
py
|
Python
|
work/code/paddle_nets.py
|
genetic-medicine/-RNA-
|
956110a29c9c53719e34e121cf481448beba89e0
|
[
"Apache-2.0"
] | null | null | null |
work/code/paddle_nets.py
|
genetic-medicine/-RNA-
|
956110a29c9c53719e34e121cf481448beba89e0
|
[
"Apache-2.0"
] | null | null | null |
work/code/paddle_nets.py
|
genetic-medicine/-RNA-
|
956110a29c9c53719e34e121cf481448beba89e0
|
[
"Apache-2.0"
] | 1
|
2021-08-15T10:40:00.000Z
|
2021-08-15T10:40:00.000Z
|
#%%
import paddle as mi
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
import logging
# homebrew
import misc
logger = logging.getLogger(__name__)
def calc_padding(kernel_size, stride=1, dilation=1):
padding = ((dilation * (kernel_size - 1) + 1) - stride) / 2
if padding != int(padding):
logger.critical('Padding is NOT an integer!')
else:
padding = int(padding)
return padding
def position_encoding_trig(instance_size, base=10000, curve='trig'):
""" x dim: [i=batch_size, j=seq_len, k=feature_dim]
As the longest wavelength (for the last two dimensions) is 2pi*base,
it appears reasonable for the final dimensions to stay within quarter
wavelength, in this case, 2pi*base > 4 * max_seqlen, base >~ 0.64*max_seqlen
"""
# x = mi.empty((args.batch_size, 50, 128), # args.feature_dim), dtype='float32')
assert len(instance_size) >= 2, "input dim must be at least 2"
assert instance_size[-1] % 2 == 0, "feature dim must be even"
jlen = instance_size[-2]
klen = instance_size[-1]
base = mi.to_tensor(base, dtype='float32')
j = mi.arange(0, jlen, 1, dtype='float32').reshape((jlen, 1))
k = mi.arange(0, klen // 2, 1, dtype='float32').reshape((1, klen // 2))
# omega_k = 1 / 10000 ** (2 * k / klen)
omega_k = mi.exp(-mi.log(base) * 2.0 / klen * k)
omega_jk = mi.matmul(j, omega_k)
pe = mi.zeros((jlen, klen), dtype='float32')
pe[:, 0::2] = mi.sin(omega_jk)
pe[:, 1::2] = mi.cos(omega_jk)
return pe
def get_attn_mask(x, seqs_len):
""" """
if seqs_len is None:
return None
batch_size, src_len, d_model = x.shape
if all(seqs_len.numpy() == src_len):
return None
# mask is added to the product before softmax
attn_mask = mi.full((batch_size, 1, src_len, src_len), 0) # -np.inf)
for i in range(batch_size):
# attn_mask[i, 0, :seqs_len[i], :seqs_len[i]] = 0
attn_mask[i, 0, seqs_len[i]:, :seqs_len[i]] = -np.inf
attn_mask[i, 0, :seqs_len[i], seqs_len[i]:] = -np.inf
return attn_mask
class PositionEncoder(nn.Layer):
""" better create a buffer to """
def __init__(self, input_size, curve='trig'):
super(PositionEncoder, self).__init__()
pos_mat = position_encoding_trig(input_size, curve=curve)
self.register_buffer('pos_mat', pos_mat, persistable=False)
def forward(self, x, beta=1.0):
jlen = x.shape[-2]
klen = x.shape[-1]
return x + beta * self.pos_mat[:jlen, :klen]
class AttentionMask(nn.Layer):
""" not a good idea """
def __init__(self, max_len=1024):
super(AttentionMask, self).__init__()
attn_mask = mi.full((max_len, max_len), np.inf)
self.register_buffer('attn_mask', attn_mask, persistable=False)
def forward(self, x, seqs_len=1024):
batch_size, src_len, d_model = x.shape
return self.attn_mask[:, :, :seqs_len, :seqs_len]
class AxisNorm(nn.Layer):
def __init__(self, axis=-1, epsilon=1e-6):
super(AxisNorm, self).__init__()
self.axis = axis
self.epsilon = 1e-6
def forward(self, x):
x -= mi.mean(x, axis=self.axis, keepdim=True)
x /= mi.sqrt(mi.var(x, axis=self.axis, keepdim=True) + self.epsilon)
return x
class MyEmbeddingLayer(nn.Layer):
def __init__(self, args, in_features=None):
super(MyEmbeddingLayer, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.residue_fmt = args.residue_fmt
if in_features is None:
self.in_features = int(args.feature_dim)
else:
self.in_features = int(in_features)
self.embed_dim = int(args.embed_dim)
self.embed_num = int(args.embed_num)
in_features = self.in_features # keep record of current feature dim
if self.residue_fmt in ['scalar', 'quant'] and self.embed_num > 0:
self.embed = nn.Embedding(
in_features,
self.embed_dim,
padding_idx = 0,
sparse = False)
in_features = self.embed_dim
else:
pass # another option is
self.out_features = in_features
def forward(self, x, seqs_len=None):
if hasattr(self, 'embed'):
if not isinstance(x, mi.Tensor) or x.dtype.name != 'INT64':
x = mi.to_tensor(x, dtype='int64')
x = self.embed(x)
else:
if not isinstance(x, mi.Tensor) or x.dtype.name != 'FP32':
x = mi.to_tensor(x, dtype='float32')
return x
def summary(self, input_size=None):
if input_size is None:
input_size = (4, 512, self.in_features)
return mi.summary(self, input_size)
def MyLinearBlock(ndims, data_format='NLC', act_fn='ReLU', norm_fn='none', norm_axis=-1, dropout=0,
is_return=False):
""" return a list of linear layers from ndims[0] to ndims[-1] """
num_layers = len(ndims) # including the first and last layers
data_format = data_format.upper()
act_fn = act_fn.lower()
norm_fn = norm_fn.lower()
block_layers = []
for idx_out in range(1, num_layers): # i_in = i_out -1
block_layers.append(nn.Linear(ndims[idx_out - 1], ndims[idx_out]))
if is_return and idx_out == num_layers - 1:
break
if act_fn == 'none':
pass
elif act_fn == 'relu':
block_layers.append(nn.ReLU())
elif act_fn == 'relu6':
block_layers.append(nn.ReLU6())
else:
logger.warning(f'cannot recognize act_fn: {act_fn}')
if norm_fn.startswith('none'):
pass
elif norm_fn.startswith('batch'):
# for each dim along [C], norm_fn the [NL] 2D array
block_layers.append(nn.BatchNorm1D(ndims[idx_out], data_format=data_format))
elif norm_fn.startswith('insta'): # only works for NCL or NC format
# for each dim along [C], normalize the [L] 1D array (no normalization along N)
# InstanceNorm2D will normalize for [HW] for each channel
block_layers.append(nn.InstanceNorm1D(ndims[idx_out], data_format=data_format))
elif norm_fn.startswith('layer'):
# normalize the ndarray specified by the passed shape, starting from the last dim
# an integer will normalize the [:,...:,j] for each j for each data in the batch
# a shape af a two-element tuple will normalize the last two dims
# the passed shape must match the shapes of the data, starting from the last dim
block_layers.append(nn.LayerNorm(ndims[idx_out]))
elif norm_fn.startswith('axis'):
block_layers.append(AxisNorm(norm_axis))
else:
logger.warning(f'cannot recognize norm_fn: {norm_fn}')
if dropout > 0:
block_layers.append(nn.Dropout(dropout, name=f'Dropout{dropout:0.2g}'))
return block_layers
def MyConv1DBlock(ndims, stride=1, dilation=1, kernel_size=3, padding=1, padding_mode='zeros',
max_pool=1, act_fn='relu', norm_fn='none', norm_axis=-1, dropout=0, data_format='NLC',
is_return=False):
""" return a list of conv1d layers from nchannels[0] to nchannels[-1] """
num_layers = len(ndims) # including the first and last layers
data_format = data_format.upper()
act_fn = act_fn.lower()
norm_fn = norm_fn.lower()
block_layers = []
for idx_out in range(1, num_layers): # i_in = i_out - 1
block_layers.append(nn.Conv1D(
in_channels = ndims[idx_out - 1],
out_channels = ndims[idx_out],
stride = stride,
kernel_size = kernel_size,
dilation = dilation,
padding = padding,
padding_mode = padding_mode,
data_format = data_format,
))
if is_return and idx_out == num_layers - 1:
break
if max_pool > 1:
block_layers.append(nn.MaxPool1D(ndims[idx_out], stride=1, padding=max_pool // 2))
if act_fn == 'none':
pass
elif act_fn == 'relu':
block_layers.append(nn.ReLU())
elif act_fn == 'relu6':
block_layers.append(nn.ReLU6())
else:
logger.warning(f'cannot recognize act_fn: {act_fn}')
if norm_fn.startswith('none'):
pass
elif norm_fn.startswith('batch'):
block_layers.append(nn.BatchNorm1D(ndims[idx_out], data_format=data_format))
elif norm_fn.startswith('insta'):
block_layers.append(nn.InstanceNorm1D(ndims[idx_out], data_format=data_format))
elif norm_fn.startswith('layer'):
block_layers.append(nn.LayerNorm(ndims[idx_out]))
elif norm_fn.startswith('axis'):
block_layers.append(AxisNorm(norm_axis))
else:
logger.warning(f'cannot recognize norm_fn: {norm_fn}')
if dropout > 0:
block_layers.append(nn.Dropout(dropout))
return block_layers
def MyConv2DBlock(ndims, stride=1, dilation=1, kernel_size=3, padding=1, padding_mode='zeros',
max_pool=1, act_fn='relu', norm_fn='none', norm_axis=-1, dropout=0, data_format='NCHW',
is_return=False):
""" return a list of conv2d layers from nchannels[0] to nchannels[-1] """
num_layers = len(ndims) # including the first and last layers
data_format = data_format.upper()
act_fn = act_fn.lower()
norm_fn = norm_fn.lower()
block_layers = []
for idx_out in range(1, num_layers):
block_layers.append(nn.Conv2D(
in_channels = ndims[idx_out - 1],
out_channels = ndims[idx_out],
stride = stride,
kernel_size = kernel_size,
dilation = dilation,
padding = padding,
padding_mode = padding_mode,
data_format = data_format,
))
if is_return and idx_out == num_layers - 1:
break
if max_pool > 1:
block_layers.append(nn.MaxPool2D(ndims[idx_out],
stride=1, padding=max_pool // 2,
data_format=data_format))
if act_fn == 'none':
pass
elif act_fn == 'relu':
block_layers.append(nn.ReLU())
elif act_fn == 'relu6':
block_layers.append(nn.ReLU6())
else:
logger.warning(f'cannot recognize act_fn: {act_fn}')
if norm_fn.startswith('none'):
pass
elif norm_fn.startswith('batch'):
block_layers.append(nn.BatchNorm2D(ndims[idx_out], data_format=data_format))
elif norm_fn.startswith('insta'):
block_layers.append(nn.InstanceNorm2D(ndims[idx_out], data_format=data_format))
elif norm_fn.startswith('layer'):
block_layers.append(nn.LayerNorm(ndims[idx_out]))
elif norm_fn.startswith('axis'):
block_layers.append(AxisNorm(norm_axis))
else:
logger.warning(f'cannot recognize norm_fn: {norm_fn}')
if dropout > 0:
block_layers.append(nn.Dropout2D(dropout, data_format=data_format))
return block_layers
class MyLinearTower(nn.Layer):
def __init__(self, args, in_features=None, is_return=False):
""" is_return:True will trun off Act/Norm/Dropout for the last block """
super(MyLinearTower, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.is_return = is_return
if in_features is None:
self.in_features = int(args.feature_dim)
else:
self.in_features = int(in_features)
self.data_format = 'NLC'
self.act_fn = args.act_fn
self.norm_fn = args.norm_fn
self.norm_axis = int(args.norm_axis)
self.dropout = float(args.dropout)
self.linear_dim = [int(_i) for _i in args.linear_dim] \
if hasattr(args.linear_dim, '__len__') else [int(args.linear_dim)]
self.linear_num = int(args.linear_num)
self.linear_resnet = args.linear_resnet
in_features = self.in_features
self.linear_layers = [] # addditional layers if needed
for i in range(self.linear_num):
is_return = (i == self.linear_num -1) if self.is_return else False
self.linear_layers.append(nn.Sequential(*MyLinearBlock(
[in_features] + self.linear_dim,
dropout = self.dropout,
act_fn = self.act_fn,
norm_fn = self.norm_fn,
norm_axis = self.norm_axis,
data_format = self.data_format,
is_return = is_return,
)))
if self.linear_resnet and in_features != self.linear_dim[-1]:
logger.critical(f'linear_resnet requires in_features: {in_features} == linear_dim[-1]: {self.linear_dim[-1]}')
in_features = self.linear_dim[-1]
self.add_sublayer(f'leg1_linear{i}', self.linear_layers[i])
self.out_features = in_features
def forward(self, x, seqs_len=None):
# if not isinstance(x, mi.Tensor) or x.dtype.name != 'FP32':
# x = mi.to_tensor(x, dtype='float32')
for linear in self.linear_layers:
if self.linear_resnet:
x = x + linear(x)
else:
x = linear(x)
return x
def summary(self, input_size=None):
if input_size is None:
input_size = (4, 512, self.in_features)
return mi.summary(self, input_size)
class MyLSTMTower(nn.Layer):
def __init__(self, args, in_features=None):
super(MyLSTMTower, self).__init__()
self.dropout = float(args.dropout)
if in_features is None:
self.in_features = int(args.feature_dim)
else:
self.in_features = int(in_features)
self.lstm_dim = [int(_i) for _i in args.lstm_dim] \
if hasattr(args.lstm_dim, '__len__') else [int(args.lstm_dim)]
self.lstm_direct = int(args.lstm_direct)
self.lstm_num = int(args.lstm_num)
self.lstm_resnet = args.lstm_resnet
in_features = self.in_features
self.lstm_layers = []
for i in range(len(self.lstm_dim)):
self.lstm_layers.append(nn.LSTM(
input_size = in_features,
hidden_size = self.lstm_dim[i],
num_layers = self.lstm_num,
direction = 'forward' if self.lstm_direct == 1 else 'bidirectional',
dropout = self.dropout,
))
out_features = self.lstm_dim[i] * self.lstm_direct
if self.lstm_resnet and in_features != out_features:
logger.critical(f'lstm_resnet requires in_features: {in_features} == out_features {out_features}')
in_features = out_features
self.add_sublayer(f'lstm{i}', self.lstm_layers[i])
self.out_features = in_features
def forward(self, x, seqs_len=None):
for lstm in self.lstm_layers:
if self.lstm_resnet:
x_out, (_, _) = lstm(x, initial_states=None, sequence_length=seqs_len)
x = x + x_out
else:
x, (_, _) = lstm(x, initial_states=None, sequence_length=seqs_len)
return x
def summary(self, input_size=None):
if input_size is None:
input_size = (4, 512, self.in_features)
return mi.summary(self, input_size)
class MyAttnTower(nn.Layer):
def __init__(self, args, in_features=None):
super(MyAttnTower, self).__init__()
self.dropout = float(args.dropout)
self.act_fn = args.attn_act
if in_features is None:
self.in_features = int(args.feature_dim)
else:
self.in_features = int(in_features)
self.attn_num = int(args.attn_num)
# self.attn_dim = int(args.attn_dim) # which is the same as in_features
self.attn_ffdim = int(args.attn_ffdim)
self.attn_nhead = int(args.attn_nhead)
self.attn_dropout = args.attn_dropout # can be None
self.attn_ffdropout = args.attn_ffdropout
in_features = self.in_features
self.posi_encoder = PositionEncoder((1, 2000, in_features))
# self.attn_mask = AttentionMask(args.batch_size, self.attn_nhead, args.max_seqlen)
attn_layer = nn.TransformerEncoderLayer(
d_model = in_features,
nhead = self.attn_nhead,
dim_feedforward = self.attn_ffdim, # feed_forward dimension
dropout = self.dropout, # between layers (default: 0.1)
activation = self.act_fn, # (default: relu)
attn_dropout = self.attn_dropout, # for self-attention target
act_dropout = self.attn_ffdropout, # after activation in feedforward
normalize_before = True, # between layers
weight_attr = None,
bias_attr = None,
)
self.attn = nn.TransformerEncoder(
attn_layer,
num_layers= self.attn_num,
norm = None,
)
self.out_features = in_features
def forward(self, x, seqs_len=None):
x = self.posi_encoder(x, beta=1.0)
x = self.attn(x, get_attn_mask(x, seqs_len))
return x
def summary(self, input_size=None):
if input_size is None:
input_size = (4, 512, self.in_features)
return mi.summary(self, input_size)
class MyConv1DTower(nn.Layer):
def __init__(self, args, in_features=None):
super(MyConv1DTower, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.data_format = 'NLC'
self.act_fn = args.act_fn
self.norm_fn = args.norm_fn
self.norm_axis = int(args.norm_axis)
self.dropout = float(args.dropout)
if in_features is None:
self.in_features = int(args.feature_dim)
else:
self.in_features = int(in_features)
self.conv1d_dim = [int(_i) for _i in args.conv1d_dim] \
if hasattr(args.conv1d_dim, '__len__') else [int(args.conv1d_dim)]
self.conv1d_num = int(args.conv1d_num)
self.conv1d_resnet = args.conv1d_resnet
self.conv1d_stride = 1
self.conv1d_dilation = 1
self.kernel_size = 5
# self.padding =
in_features = self.in_features
# 1D convolution layers
stride, dilation, kernel_size = 1, 1, 5
# padding is set to return length/stride
padding = calc_padding(kernel_size, stride=stride, dilation=dilation)
self.conv1d_layers = []
for i in range(self.conv1d_num):
self.conv1d_layers.append(nn.Sequential(*MyConv1DBlock(
[in_features] + self.conv1d_dim,
stride = stride,
kernel_size = kernel_size,
dilation = dilation,
padding = padding, padding_mode = 'zeros',
data_format = self.data_format,
dropout = self.dropout,
act_fn = self.act_fn,
norm_fn = self.norm_fn,
norm_axis = self.norm_axis,
)))
if self.conv1d_resnet and in_features != self.conv1d_dim[-1]:
logger.critical(f'conv1d_resnet requires in_features: {in_features} == conv1d_dim[-1]: {self.conv1d_dim[-1]}')
in_features = self.conv1d_dim[-1]
self.add_sublayer(f'conv1d{i}', self.conv1d_layers[i])
self.out_features = in_features
def forward(self, x, seqs_len=None):
for conv1d in self.conv1d_layers:
if self.conv1d_resnet:
x = x + conv1d(x)
else:
x = conv1d(x)
return x
def summary(self, input_size=None):
if input_size is None:
input_size = (4, 512, self.in_features)
return mi.summary(self, input_size)
class MyConv2DTower(nn.Layer):
def __init__(self, args, in_features=None):
super(MyConv2DTower, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.act_fn = args.act_fn
self.norm_fn = args.norm_fn
self.norm_axis = int(args.norm_axis)
self.dropout = float(args.dropout)
self.data_format = 'NLC'
if in_features is None:
self.in_features = int(args.feature_dim)
else:
self.in_features = int(in_features)
self.conv2d_dim = [int(_i) for _i in args.conv2d_dim] \
if hasattr(args.conv2d_dim, '__len__') else [int(args.conv2d_dim)]
self.conv2d_num = int(args.conv2d_num)
self.conv2d_resnet = args.conv2d_resnet
in_features = self.in_features
# 1D convolution layers
stride, dilation, kernel_size = 1, 1, 3
# padding is set to return length/stride
padding = calc_padding(kernel_size, stride=stride, dilation=dilation)
self.conv2d_layers = []
for i in range(self.conv2d_num):
self.conv2d_layers.append(nn.Sequential(*MyConv2DBlock(
[in_features] + self.conv2d_dim,
stride = stride,
kernel_size = kernel_size,
dilation = dilation,
padding = padding,
padding_mode = 'zeros',
act_fn = self.act_fn,
norm_fn = self.norm_fn,
norm_axis = self.norm_axis,
dropout = self.dropout,
data_format = 'NCHW'
)))
if self.conv2d_resnet and in_features != self.conv2d_dim[-1]:
logger.critical(f'conv2d_resnet requires in_features: {in_features} == conv2d_dim[-1]: {self.conv2d_dim[-1]}')
in_features = self.conv2d_dim[-1]
self.add_sublayer(f'conv2d{i}', self.conv2d_layers[i])
self.out_features = in_features
def forward(self, x, seqs_len=None):
for conv2d in self.conv2d_layers:
if self.conv2d_resnet:
x = x + conv2d(x)
else:
x = conv2d(x)
return x
def summary(self, input_size=None):
if input_size is None:
input_size = (4, 512, self.in_features)
return mi.summary(self, input_size)
class Seq2MatTransform(nn.Layer):
def __init__(self, method='concat', in_fmt='NCL', out_fmt='NCHW'):
super(Seq2MatTransform, self).__init__()
self.method = method.upper()
self.in_fmt = in_fmt.upper()
self.out_fmt = out_fmt.upper()
def forward(self, xh, xw):
if self.in_fmt == 'NCL':
pass
elif self.in_fmt == 'NLC':
xh = xh.transpose([0, 2, 1])
xw = xw.transpose([0, 2, 1])
else:
logger.critical(f"Uknown in_fmt: {self.in_fmt}!")
N, C, H = xh.shape
N1, C1, W = xw.shape
assert N == N1, f"Two matrices must have the same N: {N} != {N1}!"
xh = xh.unsqueeze(3).expand((N, C, H, W))
xw = xw.unsqueeze(2).expand((N, C1, H, W))
if self.method.startswith('CONCAT'):
x = mi.concat([xh, xw], axis=1) # --> [N, C+C1, L, L]
elif self.method.startswith('ADD'):
assert C == C1, f"Cannot add two matrices with different C: {C} != {C1}"
x = xh + xw
elif self.method.startswith('MUL'):
assert C == C1, f"Cannot multiply two matrices with different C: {C} != {C1}"
x = xh * xw
else:
logger.critical(f"Unknown method: {self.method}")
if self.out_fmt == 'NCHW':
pass
elif self.out_fmt == 'NHWC':
x = x.transpose([0, 2, 3, 1])
else:
logger.critical(f"Uknown out_fmt: {self.out_fmt}!")
return x
class LazyLinearNet(nn.Layer):
""" This ignores all inter-residue interactions """
def __init__(self, args):
super(LazyLinearNet, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.data_format = args.input_fmt.upper()
self.feature_dim = int(args.feature_dim)
in_features = self.feature_dim # keep record of current feature dim
self.embed = MyEmbeddingLayer(args, in_features=in_features)
in_features = self.embed.out_features
self.linear_in = MyLinearTower(args, in_features=in_features)
in_features = self.linear_in.out_features
self.output_dim = [int(_i) for _i in args.output_dim] \
if hasattr(args.output_dim, '__len__') else [int(args.output_dim)]
self.output_num = int(args.output_num)
self.out = MyLinearTower(misc.Struct(
data_format = args.input_fmt,
feature_dim = args.feature_dim, # overwritten by in_features below
linear_dim = args.output_dim,
linear_num = args.output_num,
linear_resnet = False,
act_fn = args.act_fn,
norm_fn = args.norm_fn,
norm_axis = args.norm_axis,
dropout = args.dropout,
),
in_features = in_features,
is_return = True,
)
def summary(self, input_size=None):
if input_size is None:
if hasattr(self.embed, 'embed'):
input_size = (4, 512)
else:
input_size = (4, 512, self.feature_dim)
return mi.summary(self, input_size)
# @mi.jit.to_static
def forward(self, x, seqs_len=None):
x = self.embed(x)
x = self.linear_in(x, seqs_len=seqs_len)
x = self.out(x)
return x
class Seq2Seq_LSTMNet(nn.Layer):
def __init__(self, args):
super(Seq2Seq_LSTMNet, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.data_format = args.input_fmt.upper()
self.feature_dim = int(args.feature_dim)
in_features = self.feature_dim # keep record of current feature dim
self.embed = MyEmbeddingLayer(args, in_features=in_features)
in_features = self.embed.out_features
self.linear_in = MyLinearTower(args, in_features=in_features)
in_features = self.linear_in.out_features
self.lstm = MyLSTMTower(args, in_features=in_features)
in_features = self.lstm.out_features
self.output_dim = [int(_i) for _i in args.output_dim] \
if hasattr(args.output_dim, '__len__') else [int(args.output_dim)]
self.output_num = int(args.output_num)
self.out = MyLinearTower(misc.Struct(
data_format = args.input_fmt,
feature_dim = args.feature_dim, # overwritten by in_features below
linear_dim = args.output_dim,
linear_num = args.output_num,
linear_resnet = False,
act_fn = args.act_fn,
norm_fn = args.norm_fn,
norm_axis = args.norm_axis,
dropout = args.dropout,
),
in_features = in_features,
is_return = True,
)
# @property
def summary(self, input_size=None):
if input_size is None:
if hasattr(self.embed, 'embed'):
input_size = (4, 512)
else:
input_size = (4, 512, self.feature_dim)
return mi.summary(self, input_size)
def forward(self, x, seqs_len=None):
logger.debug('Applying self.embed()')
x = self.embed(x)
logger.debug('Applying self.linear_in()')
x = self.linear_in(x, seqs_len=seqs_len)
logger.debug('Applying self.lstm()')
x = self.lstm(x, seqs_len=seqs_len)
logger.debug('Applying self.out()')
x = self.out(x)
return x
class Seq2Seq_Conv1DNet(nn.Layer):
""" This information """
def __init__(self, args):
super(Seq2Seq_Conv1DNet, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.data_format = args.input_fmt.upper()
self.feature_dim = int(args.feature_dim)
in_features = self.feature_dim # keep record of current feature dim
self.embed = MyEmbeddingLayer(args, in_features=in_features)
in_features = self.embed.out_features
self.linear_in = MyLinearTower(args, in_features=in_features)
in_features = self.linear_in.out_features
self.conv1d = MyConv1DTower(args, in_features=in_features)
in_features = self.conv1d.out_features
self.output_dim = [int(_i) for _i in args.output_dim] \
if hasattr(args.output_dim, '__len__') else [int(args.output_dim)]
self.output_num = int(args.output_num)
self.out = MyLinearTower(misc.Struct(
data_format = args.input_fmt,
feature_dim = args.feature_dim, # overwritten by in_features below
linear_dim = args.output_dim,
linear_num = args.output_num,
linear_resnet = False,
act_fn = args.act_fn,
norm_fn = args.norm_fn,
norm_axis = args.norm_axis,
dropout = args.dropout,
),
in_features = in_features,
is_return = True,
)
def forward(self, x, seqs_len=None): #, predict=False):
x = self.embed(x)
x = self.linear_in(x, seqs_len=seqs_len)
x = self.conv1d(x, seqs_len=seqs_len)
x = self.out(x)
return x
def summary(self, input_size=None):
if input_size is None:
if hasattr(self.embed, 'embed'):
input_size = (4, 512)
else:
input_size = (4, 512, self.feature_dim)
return mi.summary(self, input_size)
class Seq2Seq_Conv2DNet(nn.Layer):
""" This ignores all inter-residue information """
def __init__(self, args):
super(Seq2Seq_Conv2DNet, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.act_fn = args.act_fn
self.norm_fn = args.norm_fn
self.norm_axis = args.norm_axis
self.dropout = float(args.dropout)
self.norm_axis = int(args.norm_axis)
self.data_format = 'NLC'
self.feature_dim = int(args.feature_dim)
self.linear_dim = [int(_i) for _i in args.linear_dim] \
if hasattr(args.linear_dim, '__len__') else [int(args.linear_dim)]
self.linear_num = int(args.linear_num)
self.linear_resnet = args.linear_resnet
self.conv2d_dim = [int(_i) for _i in args.conv2d_dim] \
if hasattr(args.conv2d_dim, '__len__') else [int(args.conv2d_dim)]
self.conv2d_num = int(args.conv2d_num)
self.conv2d_resnum = args.conv2d_resnet
in_features = self.feature_dim # keep record of current feature dim
in_features = self.feature_dim # keep record of current feature dim
self.leg1_linear = [] # addditional layers if needed
for i in range(self.linear_num):
self.leg1_linear.append(nn.Sequential(*MyLinearBlock(
[in_features] + self.linear_dim,
dropout = self.dropout,
norm_fn = self.norm_fn,
act_fn = self.act_fn,
data_format = self.data_format
)))
in_features = self.linear_dim[-1]
self.add_sublayer(f'leg1_linear{i}', self.leg1_linear[i])
stride, dilation, kernel_size = 1, 1, 5
# padding is calculated so as to return length/stride
padding = calc_padding(kernel_size, stride=stride, dilation=dilation)
in_features = in_features * 2 # due to outer concatenation
self.leg2_conv2d = []
for i in range(self.conv2d_num):
self.leg2_conv2d.append(nn.Sequential(*MyConv2DBlock(
[in_features] + self.conv2d_dim,
stride = stride, kernel_size = kernel_size, dilation = dilation,
padding = padding, padding_mode = 'zeros', norm_fn=self.norm_fn,
dropout = self.dropout, data_format = 'NCHW'
)))
in_features = self.conv2d_dim[-1]
self.add_sublayer(f'leg2_conv2d{i}', self.leg2_conv2d[i])
self.leg3_linear = []
for i in range(2):
self.leg3_linear.append(nn.Sequential(*MyLinearBlock(
[in_features, in_features], #, feature_dim // 2],
dropout = self.dropout,
act_fn = self.act_fn,
norm_fn = self.norm_fn,
data_format = self.data_format,
)))
in_features = in_features
self.add_sublayer(f'leg3_linear{i}', self.leg3_linear[i])
# setattr(self, f'blk3layer{i}', self.blk3_linear[i])
self.out = nn.Sequential(
nn.Linear(in_features=in_features, out_features=2),
# nn.ReLU(),
nn.Softmax(axis=-1),
)
def forward(self, x, seqs_len=None):
if not isinstance(x, mi.Tensor) or x.dtype.name != 'FP32':
x = mi.to_tensor(x, dtype='float32')
# x starts with [N:batch_size, L:seq_len, C:channel/feature_dim]
# [N, L, C] --> [N, L, self.linear_dim[-1]]
for linear in self.leg1_linear:
if self.linear_resnet:
x = x + linear(x)
else:
x = linear(x)
# for each channel/feature, get a LxL matrix
x = mi.transpose(x, perm=[0, 2, 1]) # [NLC] --> [NCL]
new_shape = [x.shape[0], x.shape[1], x.shape[2], x.shape[2]]
x = mi.concat([mi.broadcast_to(mi.unsqueeze(x, axis=3), shape=new_shape),
mi.broadcast_to(mi.unsqueeze(x, axis=2), shape=new_shape)],
axis=1) # [NCLL] --> [N, 2*C, L, L]
for conv2d in self.leg2_conv2d:
if self.conv2d_resnet:
x = x + conv2d(x)
else:
x = conv2d(x)
x = mi.transpose(x, perm=[0, 3, 2, 1]) # --> [N, L, L, 2*conv2d_dim[-1]]
for linear in self.leg3_linear:
x = linear(x)
x = (x + mi.transpose(x, perm=[0, 2, 1, 3])) / 2
x = self.out(x)
x = mi.squeeze(x[:, :, :, 0], axis=-1) # -> [N, L, L, 1] -> [NLL]
x = x * (1.0 - mi.eye(x.shape[1], dtype='float32'))
x = mi.max(x, axis=-1)
# how to go from the LxL matrix to the unpaired probability
# x = F.sigmoid(mi.sum(x, axis=-1))
# concatenate or multiply (which reduces the feature dimension to 1)
# x = mi.bmm(x, mi.transpose(x, perm=[0, 2, 1]))
return x # mi.squeeze(x[:,:,0], axis=-1)
def summary(self, input_size=None):
if input_size is None:
input_size = (4, 512, self.feature_dim)
return mi.summary(self, input_size)
class Seq2Seq_AttnNet(nn.Layer):
def __init__(self, args):
super(Seq2Seq_AttnNet, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.data_format = args.input_fmt.upper()
self.feature_dim = int(args.feature_dim)
in_features = self.feature_dim # keep record of current feature dim
self.embed = MyEmbeddingLayer(args, in_features=in_features)
in_features = self.embed.out_features
self.linear_in = MyLinearTower(args, in_features=in_features)
in_features = self.linear_in.out_features
self.attn = MyAttnTower(args, in_features=in_features)
in_features = self.attn.out_features
self.output_dim = [int(_i) for _i in args.output_dim] \
if hasattr(args.output_dim, '__len__') else [int(args.output_dim)]
self.output_num = int(args.output_num)
self.out = MyLinearTower(misc.Struct(
data_format = args.input_fmt,
feature_dim = args.feature_dim, # overwritten by in_features below
linear_dim = args.output_dim,
linear_num = args.output_num,
linear_resnet = False,
act_fn = args.act_fn,
norm_fn = args.norm_fn,
norm_axis = args.norm_axis,
dropout = args.dropout,
),
in_features = in_features,
is_return = True,
)
def forward(self, x, seqs_len=None):
x = self.embed(x)
x = self.linear_in(x, seqs_len=seqs_len)
x = self.attn(x, seqs_len=seqs_len)
x = self.out(x)
return x
# @property
def summary(self, input_size=None):
if input_size is None:
if hasattr(self.embed, 'embed'):
input_size = (2, 512)
else:
input_size = (2, 512, self.feature_dim)
return mi.summary(self, input_size)
class Seq2Seq_EmbedLSTMNet_OLD(nn.Layer):
def __init__(self, args):
super(Seq2Seq_EmbedLSTMNet_OLD, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.act_fn = args.act_fn
self.norm_fn = args.norm_fn
self.norm_axis = int(args.norm_axis)
self.dropout = float(args.dropout)
self.data_format = 'NLC'
self.feature_dim = int(args.feature_dim)
self.embed_dim = int(args.embed_dim)
self.linear_dim = [int(_i) for _i in args.linear_dim] \
if hasattr(args.linear_dim, '__len__') else [int(args.linear_dim)]
self.linear_num = int(args.linear_num)
self.linear_resnet = args.linear_resnet
self.lstm_dim = [int(_i) for _i in args.lstm_dim] \
if hasattr(args.lstm_dim, '__len__') else [int(args.lstm_dim)]
self.lstm_direct = int(args.lstm_direct)
self.lstm_num = int(args.lstm_num)
self.lstm_resnet = args.lstm_resnet
in_features = self.feature_dim # keep record of current feature dim
self.embed = nn.Embedding(
in_features,
self.embed_dim,
padding_idx = 0,
sparse = True)
in_features = self.embed_dim
self.leg1_linear = [] # addditional layers if needed
for i in range(self.linear_num):
self.leg1_linear.append(nn.Sequential(*MyLinearBlock(
[in_features] + self.linear_dim,
dropout = self.dropout,
norm_fn = self.norm_fn,
norm_axis = self.norm_axis,
act_fn = self.act_fn,
data_format = self.data_format
)))
in_features = self.linear_dim[-1]
self.add_sublayer(f'leg1_linear{i}', self.leg1_linear[i])
# setattr(self, f'blk1layer{i}', self.blk1_linear[i])
# how to give the initial hidden and cell states of lstm???
# Maybe it is not important
self.leg2_lstm = []
for i in range(len(self.lstm_dim)):
self.leg2_lstm.append(nn.LSTM(
input_size = in_features,
hidden_size = self.lstm_dim[i],
num_layers = self.lstm_num,
direction = 'forward' if self.lstm_direct == 1 else 'bidirectional',
dropout = args.dropout,
))
in_features = self.lstm_dim[i] * self.lstm_direct
self.add_sublayer(f'leg2_lstm{i}', self.leg2_lstm[i])
# setattr(self, f'blk2layer{i}', self.blk2_lstm[i])
self.leg3_linear = []
for i in range(2):
self.leg3_linear.append(nn.Sequential(*MyLinearBlock(
[in_features, in_features // 2], #, feature_dim // 2],
dropout = self.dropout,
act_fn = self.act_fn,
norm_fn = self.norm_fn,
norm_axis = self.norm_axis,
data_format = self.data_format,
)))
in_features = in_features // 2
self.add_sublayer(f'leg3_linear{i}', self.leg3_linear[i])
# setattr(self, f'blk3layer{i}', self.blk3_linear[i])
self.out = nn.Sequential(
nn.Linear(in_features, 2),
# nn.ReLU(),
nn.Softmax(axis=-1),
)
# @property
def summary(self, input_size=None):
if input_size is None:
input_size = (2, 512)
return mi.summary(self, input_size)
def forward(self, x, seqs_len=None):
if not isinstance(x, mi.Tensor) or x.dtype.name != 'INT64':
x = mi.to_tensor(x, dtype='int64')
x = self.embed(x)
for linear in self.leg1_linear:
if self.linear_resnet:
x = x + linear(x)
else:
x = linear(x)
# x = mi.concat((x, F.relu(self.conv1(x))), axis=-1)
for lstm in self.leg2_lstm:
if self.lstm_resnet:
x_out, (_, _) = lstm(x, initial_states=None, sequence_length=seqs_len)
x = x + x_out
else:
x, (_, _) = lstm(x, initial_states=None, sequence_length=seqs_len)
for linear in self.leg3_linear:
x = linear(x)
x = self.out(x)
# return mi.squeeze(x, axis=-1)
return mi.squeeze(x[:,:,0], axis=-1)
class Seq2Seq_EmbedAttnNet_OLD(nn.Layer):
def __init__(self, args):
super(Seq2Seq_EmbedAttnNet_OLD, self).__init__()
self.act_fn = args.act_fn
self.norm_fn = args.norm_fn
self.norm_axis = int(args.norm_axis)
self.dropout = float(args.dropout)
self.data_format = 'NLC'
self.feature_dim = args.feature_dim
self.embed_dim = int(args.embed_dim)
self.linear_dim = [int(_i) for _i in args.linear_dim] \
if hasattr(args.linear_dim, '__len__') else [int(args.linear_dim)]
self.linear_num = int(args.linear_num)
self.linear_resnet = args.linear_resnet
self.attn_num = int(args.attn_num)
self.attn_nhead = int(args.attn_nhead)
self.attn_act = args.attn_act
# self.attn_dim = int(args.attn_dim)
self.attn_dropout = args.attn_dropout # can be None
self.attn_ffdim = int(args.attn_ffdim)
self.attn_ffdropout = args.attn_ffdropout
in_features = args.feature_dim
self.embed = nn.Embedding(
in_features,
self.embed_dim,
padding_idx = 0,
sparse = True)
in_features = self.embed_dim
self.leg1_linear = [] # addditional layers if needed
for i in range(self.linear_num):
self.leg1_linear.append(nn.Sequential(*MyLinearBlock(
[in_features] + self.linear_dim,
dropout = self.dropout,
norm_fn = self.norm_fn,
norm_axis = self.norm_axis,
act_fn = self.act_fn,
data_format = self.data_format
)))
in_features = self.linear_dim[-1]
self.add_sublayer(f'leg1_linear{i}', self.leg1_linear[i])
# setattr(self, f'blk1layer{i}', self.blk1_linear[i])
attn_layer = nn.TransformerEncoderLayer(
d_model = in_features,
nhead = self.attn_nhead,
dim_feedforward = self.attn_ffdim, # feed_forward dimension
dropout = self.dropout, # between layers (default: 0.1)
activation = self.attn_act, # (default: relu)
attn_dropout = self.attn_dropout, # for self-attention target
act_dropout = self.attn_ffdropout, # after activation in feedforward
normalize_before = False, # between layers
weight_attr = None,
bias_attr = None,
)
self.leg2_attn = nn.TransformerEncoder(attn_layer,
num_layers=args.attn_num) # norm = args.norm_fn,
self.leg3_linear = []
for i in range(2):
self.leg3_linear.append(nn.Sequential(*MyLinearBlock(
[in_features, in_features], #, feature_dim // 2],
dropout = self.dropout,
act_fn = self.act_fn,
norm_fn = self.norm_fn,
data_format = self.data_format,
)))
in_features = in_features
self.add_sublayer(f'leg3_linear{i}', self.leg3_linear[i])
self.out = nn.Sequential(
nn.Linear(in_features=in_features, out_features=2),
nn.Softmax(axis=-1),
)
def forward(self, x, seqs_len=None):
if not isinstance(x, mi.Tensor) or x.dtype.name != 'INT64':
x = mi.to_tensor(x, dtype='int64')
x = self.embed(x)
for linear in self.leg1_linear:
if self.linear_resnet:
x = x + linear(x)
else:
x = linear(x)
x += position_encoding_trig(x.shape)
x = self.leg2_attn(x)
# x, (_, _) = self.lstm(x)
for linear in self.leg3_linear:
x = linear(x)
x = self.out(x)
return mi.squeeze(x[:,:,0], axis=-1)
# @property
def summary(self, input_size=None):
input_size = (2, 512) if input_size is None else tuple(input_size)
return mi.summary(self, input_size)
class Seq2Seq_Conv1DLSTMNet(nn.Layer):
""" This information """
def __init__(self, args):
super(Seq2Seq_Conv1DLSTMNet, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.data_format = args.input_fmt.upper()
self.feature_dim = int(args.feature_dim)
in_features = self.feature_dim # keep record of current feature dim
self.embed = MyEmbeddingLayer(args, in_features=in_features)
in_features = self.embed.out_features
self.linear_in = MyLinearTower(args, in_features=in_features)
in_features = self.linear_in.out_features
self.conv1d = MyConv1DTower(args, in_features=in_features)
in_features = self.conv1d.out_features
self.lstm = MyLSTMTower(args, in_features=in_features)
in_features = self.lstm.out_features
self.output_dim = [int(_i) for _i in args.output_dim] \
if hasattr(args.output_dim, '__len__') else [int(args.output_dim)]
self.output_num = int(args.output_num)
self.out = MyLinearTower(misc.Struct(
data_format = args.input_fmt,
feature_dim = args.feature_dim, # overwritten by in_features below
linear_dim = args.output_dim,
linear_num = args.output_num,
linear_resnet = False,
act_fn = args.act_fn,
norm_fn = args.norm_fn,
norm_axis = args.norm_axis,
dropout = args.dropout,
),
in_features = in_features,
is_return = True,
)
def forward(self, x, seqs_len=None):
x = self.embed(x)
x = self.linear_in(x, seqs_len=seqs_len)
x = self.conv1d(x, seqs_len=seqs_len)
x = self.lstm(x, seqs_len=seqs_len)
x = self.out(x)
return x
def summary(self, input_size=None):
if input_size is None:
if hasattr(self.embed, 'embed'):
input_size = (4, 512)
else:
input_size = (4, 512, self.feature_dim)
return mi.summary(self, input_size)
class Seq2Seq_AttnLSTMNet(nn.Layer):
def __init__(self, args):
super(Seq2Seq_AttnLSTMNet, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(), nn.initializer.Constant(0.0))
self.data_format = args.input_fmt.upper()
self.feature_dim = int(args.feature_dim)
in_features = self.feature_dim # keep record of current feature dim
self.embed = MyEmbeddingLayer(args, in_features=in_features)
in_features = self.embed.out_features
self.linear_in = MyLinearTower(args, in_features=in_features)
in_features = self.linear_in.out_features
self.attn = MyAttnTower(args, in_features=in_features)
in_features = self.attn.out_features
self.lstm = MyLSTMTower(args, in_features=in_features)
in_features = self.lstm.out_features
self.output_dim = [int(_i) for _i in args.output_dim] \
if hasattr(args.output_dim, '__len__') else [int(args.output_dim)]
self.output_num = int(args.output_num)
self.out = MyLinearTower(misc.Struct(
data_format = args.input_fmt,
feature_dim = args.feature_dim, # overwritten by in_features below
linear_dim = args.output_dim,
linear_num = args.output_num,
linear_resnet = False,
act_fn = args.act_fn,
norm_fn = args.norm_fn,
norm_axis = args.norm_axis,
dropout = args.dropout,
),
in_features = in_features,
is_return = True,
)
# @property
def summary(self, input_size=None):
if input_size is None:
if hasattr(self.embed, 'embed'):
input_size = (2, 512)
else:
input_size = (2, 512, self.feature_dim)
return mi.summary(self, input_size)
def forward(self, x, seqs_len=None):
x = self.embed(x)
x = self.linear_in(x, seqs_len=seqs_len)
x = self.attn(x, seqs_len=seqs_len)
x = self.lstm(x, seqs_len=seqs_len)
x = self.out(x)
return x
class Seq2Seq_AttnLSTMConv1DNet(nn.Layer):
def __init__(self, args):
super(Seq2Seq_AttnLSTMConv1DNet, self).__init__()
nn.initializer.set_global_initializer(nn.initializer.KaimingNormal(),
nn.initializer.Constant(0.0))
self.data_format = args.input_fmt.upper()
self.feature_dim = int(args.feature_dim)
self.embed = MyEmbeddingLayer(args, in_features=self.feature_dim)
self.fc_in = MyLinearTower(args, in_features=self.embed.out_features)
self.attn = MyAttnTower(args, in_features=self.fc_in.out_features)
self.lstm = MyLSTMTower(args, in_features=self.attn.out_features)
self.conv1d = MyConv1DTower(args, in_features=self.lstm.out_features)
in_features = self.conv1d.out_features
self.output_dim = [int(_i) for _i in args.output_dim] \
if hasattr(args.output_dim, '__len__') else [int(args.output_dim)]
self.output_num = int(args.output_num)
self.out = MyLinearTower(misc.Struct(
data_format = args.input_fmt,
feature_dim = args.feature_dim, # overwritten by in_features below
linear_dim = args.output_dim,
linear_num = args.output_num,
linear_resnet = False,
act_fn = args.act_fn,
norm_fn = args.norm_fn,
norm_axis = args.norm_axis,
dropout = args.dropout,
),
in_features = in_features,
is_return = True,
)
# @property
def summary(self, input_size=None):
if input_size is None:
if hasattr(self.embed, 'embed'):
input_size = (2, 512)
else:
input_size = (2, 512, self.feature_dim)
return mi.summary(self, input_size)
def forward(self, x, seqs_len=None):
x = self.embed(x)
x = self.fc_in(x, seqs_len=seqs_len)
x = self.attn(x, seqs_len=seqs_len)
x = self.lstm(x, seqs_len=seqs_len)
x = self.conv1d(x)
x = self.out(x)
return x
| 36.029574
| 126
| 0.588142
| 6,805
| 52,387
| 4.281117
| 0.057899
| 0.071053
| 0.037003
| 0.039817
| 0.817046
| 0.793808
| 0.771084
| 0.751416
| 0.726873
| 0.699585
| 0
| 0.01638
| 0.304293
| 52,387
| 1,453
| 127
| 36.05437
| 0.782967
| 0.081299
| 0
| 0.753259
| 0
| 0
| 0.035009
| 0.001753
| 0
| 0
| 0
| 0
| 0.004655
| 1
| 0.057728
| false
| 0.00838
| 0.005587
| 0
| 0.122905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ca893a5d0309583c23ee3eeae8d83fa4c923d49
| 9,101
|
py
|
Python
|
data/typing/numpy.ma.testutils.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | 67
|
2020-08-17T11:53:26.000Z
|
2021-11-08T20:16:06.000Z
|
data/typing/numpy.ma.testutils.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | 36
|
2020-08-17T11:09:51.000Z
|
2021-12-15T18:09:47.000Z
|
data/typing/numpy.ma.testutils.py
|
pydata-apis/python-api-record
|
684cffbbb6dc6e81f9de4e02619c8b0ebc557b2b
|
[
"MIT"
] | 7
|
2020-08-19T05:06:47.000Z
|
2020-11-04T05:10:38.000Z
|
from typing import *
@overload
def assert_almost_equal(actual: numpy.ma.core.MaskedArray, desired: List[List[float]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[Union[int, float]]):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[List[Union[int, float]]]):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[List[int]]):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: float):
"""
usage.scipy: 17
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: numpy.float64):
"""
usage.scipy: 4
"""
...
@overload
def assert_almost_equal(actual: numpy.ma.core.MaskedArray, desired: float):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: List[float]):
"""
usage.scipy: 11
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: float, decimal: int):
"""
usage.scipy: 29
"""
...
@overload
def assert_almost_equal(actual: numpy.ma.core.MaskedArray, desired: List[float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: int, decimal: int):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(
actual: numpy.ma.core.MaskedArray, desired: float, decimal: int
):
"""
usage.scipy: 5
"""
...
@overload
def assert_almost_equal(
actual: numpy.ma.core.MaskedConstant, desired: numpy.ma.core.MaskedConstant
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(
actual: numpy.ma.core.MaskedArray, desired: numpy.ma.core.MaskedArray, decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(
actual: numpy.ma.core.MaskedArray, desired: List[List[float]], decimal: int
):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: Tuple[float, float]):
"""
usage.scipy: 3
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.float64, desired: numpy.float64, decimal: int):
"""
usage.scipy: 27
"""
...
@overload
def assert_almost_equal(
actual: numpy.float64, desired: numpy.ma.core.MaskedArray, decimal: int
):
"""
usage.scipy: 7
"""
...
@overload
def assert_almost_equal(
actual: float, desired: numpy.ma.core.MaskedArray, decimal: int
):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(actual: float, desired: numpy.float64, decimal: int):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: numpy.ndarray, decimal: int):
"""
usage.scipy: 2
"""
...
@overload
def assert_almost_equal(actual: float, desired: float, decimal: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_almost_equal(actual: numpy.ndarray, desired: numpy.ma.core.MaskedArray):
"""
usage.scipy: 1
"""
...
def assert_almost_equal(
actual: Union[
numpy.ndarray,
numpy.float64,
numpy.ma.core.MaskedArray,
numpy.ma.core.MaskedConstant,
float,
],
desired: object,
decimal: int = ...,
):
"""
usage.scipy: 131
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ma.core.MaskedArray, y: numpy.ma.core.MaskedArray
):
"""
usage.matplotlib: 2
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: List[float], y: numpy.ndarray, decimal: int):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(x: numpy.ndarray, y: numpy.ndarray):
"""
usage.scipy: 5
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.NormaltestResult, y: scipy.stats.stats.NormaltestResult
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.SkewtestResult, y: scipy.stats.stats.SkewtestResult
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: scipy.stats.mstats_basic.KurtosistestResult,
y: scipy.stats.stats.KurtosistestResult,
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_almost_equal(
x: numpy.ma.core.MaskedArray, y: numpy.ndarray, decimal: int
):
"""
usage.scipy: 2
"""
...
@overload
def assert_array_almost_equal(x: numpy.ma.core.MaskedArray, y: numpy.ndarray):
"""
usage.matplotlib: 1
"""
...
def assert_array_almost_equal(
x: object,
y: Union[
numpy.ndarray,
numpy.ma.core.MaskedArray,
scipy.stats.stats.KurtosistestResult,
scipy.stats.stats.NormaltestResult,
scipy.stats.stats.SkewtestResult,
],
decimal: int = ...,
):
"""
usage.matplotlib: 3
usage.scipy: 13
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedConstant, y: Tuple[float, float]):
"""
usage.scipy: 5
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedArray, y: Tuple[float, float]):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_equal(x: numpy.float64, y: numpy.ndarray):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedArray, y: numpy.ndarray):
"""
usage.scipy: 3
"""
...
@overload
def assert_array_equal(x: numpy.float64, y: Tuple[float, float]):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(
x: scipy.stats.mstats_basic.Ttest_indResult, y: Tuple[float, float]
):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.float64, y: float):
"""
usage.scipy: 1
"""
...
@overload
def assert_array_equal(x: numpy.ma.core.MaskedArray, y: list):
"""
usage.scipy: 1
"""
...
def assert_array_equal(
x: Union[
numpy.ma.core.MaskedArray,
scipy.stats.mstats_basic.Ttest_indResult,
numpy.ma.core.MaskedConstant,
numpy.float64,
],
y: Union[list, Tuple[float, float], numpy.ndarray, float],
):
"""
usage.scipy: 16
"""
...
@overload
def assert_equal(actual: numpy.dtype, desired: None):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.float64, desired: numpy.float64):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(actual: numpy.ma.core.MaskedArray, desired: numpy.ma.core.MaskedArray):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: numpy.ma.core.MaskedArray, desired: List[int]):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(actual: numpy.ma.core.MaskedArray, desired: List[Union[None, int]]):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: List[int]):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(actual: numpy.int64, desired: int):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(actual: numpy.ndarray, desired: numpy.ndarray):
"""
usage.scipy: 15
"""
...
@overload
def assert_equal(actual: scipy.stats.mstats_basic.ModeResult, desired: Tuple[int, int]):
"""
usage.scipy: 7
"""
...
@overload
def assert_equal(
actual: scipy.stats.mstats_basic.ModeResult,
desired: Tuple[List[List[int]], List[List[int]]],
):
"""
usage.scipy: 6
"""
...
@overload
def assert_equal(actual: numpy.ma.core.MaskedArray, desired: float):
"""
usage.scipy: 3
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.float64, numpy.float64], desired: Tuple[float, float]
):
"""
usage.scipy: 5
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.float64, numpy.float64], desired: Tuple[float, int]
):
"""
usage.scipy: 4
"""
...
@overload
def assert_equal(
actual: Tuple[numpy.ma.core.MaskedArray, numpy.ma.core.MaskedArray],
desired: Tuple[numpy.ma.core.MaskedArray, numpy.ma.core.MaskedArray],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(actual: numpy.float64, desired: numpy.ma.core.MaskedArray):
"""
usage.scipy: 2
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.RepeatedResults,
desired: Tuple[numpy.ndarray, numpy.ndarray],
):
"""
usage.scipy: 1
"""
...
@overload
def assert_equal(
actual: scipy.stats.stats.KstestResult, desired: scipy.stats.stats.KstestResult
):
"""
usage.scipy: 1
"""
...
def assert_equal(actual: object, desired: object):
"""
usage.scipy: 75
"""
...
| 16.368705
| 88
| 0.604439
| 1,040
| 9,101
| 5.172115
| 0.057692
| 0.102064
| 0.180145
| 0.122699
| 0.850158
| 0.821342
| 0.776538
| 0.749582
| 0.706637
| 0.579662
| 0
| 0.017093
| 0.235029
| 9,101
| 555
| 89
| 16.398198
| 0.75553
| 0.106472
| 0
| 0.701613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245968
| 1
| 0.245968
| false
| 0
| 0.004032
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2caaf7ed9a8846be97ec85e33289df28f62af08d
| 290
|
py
|
Python
|
jsonrpcclient/utils.py
|
explodinglabs/jsonrpcclient
|
2b54a5327a0ed0b423d96ddfb50d92fb6af52f0c
|
[
"MIT"
] | 13
|
2021-08-13T20:31:53.000Z
|
2022-03-03T17:59:11.000Z
|
jsonrpcclient/utils.py
|
explodinglabs/jsonrpcclient
|
2b54a5327a0ed0b423d96ddfb50d92fb6af52f0c
|
[
"MIT"
] | 22
|
2021-08-19T11:33:01.000Z
|
2021-09-30T11:35:51.000Z
|
jsonrpcclient/utils.py
|
explodinglabs/jsonrpcclient
|
2b54a5327a0ed0b423d96ddfb50d92fb6af52f0c
|
[
"MIT"
] | 3
|
2021-09-01T02:52:34.000Z
|
2022-02-22T06:11:06.000Z
|
from functools import reduce
from typing import Any, Callable
def compose(*fs: Callable[..., Any]) -> Callable[..., Any]:
def compose2(f: Callable[..., Any], g: Callable[..., Any]) -> Callable[..., Any]:
return lambda *a, **kw: f(g(*a, **kw))
return reduce(compose2, fs)
| 29
| 85
| 0.6
| 38
| 290
| 4.578947
| 0.447368
| 0.316092
| 0.218391
| 0.252874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.186207
| 290
| 9
| 86
| 32.222222
| 0.728814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
393efcfe27a551db3143b0debc6fd6ae9faea39e
| 1,170
|
py
|
Python
|
test/test_utils.py
|
tor4z/AD_OO
|
d6ea5009803d2b9b5353762f4580be911aa04bb5
|
[
"MIT"
] | null | null | null |
test/test_utils.py
|
tor4z/AD_OO
|
d6ea5009803d2b9b5353762f4580be911aa04bb5
|
[
"MIT"
] | null | null | null |
test/test_utils.py
|
tor4z/AD_OO
|
d6ea5009803d2b9b5353762f4580be911aa04bb5
|
[
"MIT"
] | null | null | null |
import ad
def test_flatten_iterable():
iterable = [1, 2, 3]
flatten_iterable = ad.utils.flatten_iterable(iterable)
assert len(flatten_iterable) == 3
iterable = 1
flatten_iterable = ad.utils.flatten_iterable(iterable)
assert len(flatten_iterable) == 1
iterable = ([1, 2, 3])
flatten_iterable = ad.utils.flatten_iterable(iterable)
assert len(flatten_iterable) == 3
iterable = ([1, 2, 3], [5, 6, 7])
flatten_iterable = ad.utils.flatten_iterable(iterable)
assert len(flatten_iterable) == 6
iterable = (1)
flatten_iterable = ad.utils.flatten_iterable(iterable)
assert len(flatten_iterable) == 1
iterable = (1,)
flatten_iterable = ad.utils.flatten_iterable(iterable)
assert len(flatten_iterable) == 1
iterable = (([1, 2, 3], ([5, 6, 7])))
flatten_iterable = ad.utils.flatten_iterable(iterable)
assert len(flatten_iterable) == 6
iterable = (([1, 2, 3], ([5, 6, 7])))
flatten_iterable = ad.utils.flatten_iterable(iterable)
for item in [1, 2, 3, 5, 6, 7]:
assert item in flatten_iterable
for item in flatten_iterable:
assert item in [1, 2, 3, 5, 6, 7]
| 28.536585
| 58
| 0.655556
| 160
| 1,170
| 4.625
| 0.1125
| 0.527027
| 0.27973
| 0.237838
| 0.878378
| 0.878378
| 0.878378
| 0.878378
| 0.845946
| 0.845946
| 0
| 0.050109
| 0.215385
| 1,170
| 40
| 59
| 29.25
| 0.755991
| 0
| 0
| 0.586207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.310345
| 1
| 0.034483
| false
| 0
| 0.034483
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1a47738a33ede570a7842015276b1d1db4558c02
| 13,137
|
py
|
Python
|
mkdocs/tests/babel_cmd_tests.py
|
szj2ys/mkdocs
|
4b5f5d38d05a318ba77b078645b407395e396852
|
[
"BSD-2-Clause"
] | 13,746
|
2015-03-27T15:39:07.000Z
|
2022-03-31T14:01:53.000Z
|
mkdocs/tests/babel_cmd_tests.py
|
szj2ys/mkdocs
|
4b5f5d38d05a318ba77b078645b407395e396852
|
[
"BSD-2-Clause"
] | 2,012
|
2015-03-27T21:11:30.000Z
|
2022-03-31T19:45:12.000Z
|
mkdocs/tests/babel_cmd_tests.py
|
szj2ys/mkdocs
|
4b5f5d38d05a318ba77b078645b407395e396852
|
[
"BSD-2-Clause"
] | 2,556
|
2015-03-28T19:58:11.000Z
|
2022-03-30T14:23:36.000Z
|
import unittest
from distutils.dist import Distribution
from distutils.errors import DistutilsOptionError
from os import path
from mkdocs.commands import babel
BASE_DIR = path.normpath(path.join(path.abspath(path.dirname(__file__)), '../../'))
class ThemeMixinTests(unittest.TestCase):
def test_dict_entry_point(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs'
]
}
inst.theme = 'mkdocs'
self.assertEqual(inst.get_theme_dir(), path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs'))
def test_ini_entry_point(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
inst.theme = 'mkdocs'
self.assertEqual(inst.get_theme_dir(), path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs'))
def test_one_entry_point_as_default(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs'
]
}
inst.theme = None
self.assertEqual(inst.get_theme_dir(), path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs'))
def test_multiple_entry_points(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs',
'readthedocs = mkdocs.themes.readthedocs',
]
}
inst.theme = 'readthedocs'
self.assertEqual(inst.get_theme_dir(), path.join(BASE_DIR, 'mkdocs', 'themes', 'readthedocs'))
def test_multiple_entry_points_no_default(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs',
'readthedocs = mkdocs.themes.readthedocs',
]
}
inst.theme = None
self.assertRaises(DistutilsOptionError, inst.get_theme_dir)
def test_no_entry_points(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {}
inst.theme = 'mkdocs'
self.assertRaises(DistutilsOptionError, inst.get_theme_dir)
def test_undefined_entry_point(self):
inst = babel.ThemeMixin()
inst.distribution = Distribution()
inst.distribution.entry_points = {
'mkdocs.themes': [
'mkdocs = mkdocs.themes.mkdocs'
]
}
inst.theme = 'undefined'
self.assertRaises(DistutilsOptionError, inst.get_theme_dir)
class CommandTests(unittest.TestCase):
def test_compile_catalog(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.compile_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.finalize_options()
self.assertEqual(cmd.directory, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_compile_catalog_default_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.compile_catalog(dist)
cmd.initialize_options()
self.assertIsNone(cmd.theme)
cmd.finalize_options()
self.assertEqual(cmd.theme, 'mkdocs')
self.assertEqual(cmd.directory, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_compile_catalog_ignore_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.compile_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.directory = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.directory, 'foo/bar')
def test_extract_messages(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.finalize_options()
self.assertEqual(cmd.input_paths, [path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs')])
self.assertEqual(cmd.output_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.mapping_file, babel.DEFAULT_MAPPING_FILE)
self.assertEqual(cmd.project, 'foo')
self.assertEqual(cmd.version, '1.2')
def test_extract_messages_default_theme(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
self.assertIsNone(cmd.theme)
cmd.finalize_options()
self.assertEqual(cmd.theme, 'mkdocs')
self.assertEqual(cmd.input_paths, [path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs')])
self.assertEqual(cmd.output_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
def test_extract_messages_ingore_theme(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.input_paths = 'mkdocs/tests'
cmd.output_file = 'foo/bar/messages.pot'
cmd.finalize_options()
self.assertEqual(cmd.input_paths, ['mkdocs/tests'])
self.assertEqual(cmd.output_file, 'foo/bar/messages.pot')
def test_extract_messages_ingore_theme_for_input(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.input_paths = 'mkdocs/tests'
cmd.finalize_options()
self.assertEqual(cmd.input_paths, ['mkdocs/tests'])
self.assertEqual(cmd.output_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
def test_extract_messages_ingore_theme_for_output(self):
dist = Distribution(dict(name='foo', version='1.2'))
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.extract_messages(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.output_file = 'foo/bar/messages.pot'
cmd.finalize_options()
self.assertEqual(cmd.input_paths, [path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs')])
self.assertEqual(cmd.output_file, 'foo/bar/messages.pot')
def test_init_catalog(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.finalize_options()
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_init_catalog_default_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.locale = 'en'
self.assertIsNone(cmd.theme)
cmd.finalize_options()
self.assertEqual(cmd.theme, 'mkdocs')
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_init_catalog_ignore_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.input_file = 'mkdocs/themes/mkdocs/messages.pot'
cmd.output_dir = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.input_file, 'mkdocs/themes/mkdocs/messages.pot')
self.assertEqual(cmd.output_dir, 'foo/bar')
def test_init_catalog_ignore_theme_for_input(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.input_file = 'mkdocs/themes/mkdocs/messages.pot'
cmd.finalize_options()
self.assertEqual(cmd.input_file, 'mkdocs/themes/mkdocs/messages.pot')
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_init_catalog_ignore_theme_for_output(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.init_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.output_dir = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, 'foo/bar')
def test_update_catalog(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.finalize_options()
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_update_catalog_default_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.locale = 'en'
self.assertIsNone(cmd.theme)
cmd.finalize_options()
self.assertEqual(cmd.theme, 'mkdocs')
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_update_catalog_ignore_theme(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.input_file = 'mkdocs/themes/readthedocs/messages.pot'
cmd.output_dir = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.input_file, 'mkdocs/themes/readthedocs/messages.pot')
self.assertEqual(cmd.output_dir, 'foo/bar')
def test_update_catalog_ignore_theme_for_input(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.input_file = 'mkdocs/themes/mkdocs/messages.pot'
cmd.finalize_options()
self.assertEqual(cmd.input_file, 'mkdocs/themes/mkdocs/messages.pot')
self.assertEqual(cmd.output_dir, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/locales'))
def test_update_catalog_ignore_theme_for_output(self):
dist = Distribution()
dist.entry_points = '''
[mkdocs.themes]
mkdocs = mkdocs.themes.mkdocs
'''
cmd = babel.update_catalog(dist)
cmd.initialize_options()
cmd.theme = 'mkdocs'
cmd.locale = 'en'
cmd.output_dir = 'foo/bar'
cmd.finalize_options()
self.assertEqual(cmd.input_file, path.join(BASE_DIR, 'mkdocs', 'themes', 'mkdocs/messages.pot'))
self.assertEqual(cmd.output_dir, 'foo/bar')
| 37.641834
| 105
| 0.612469
| 1,425
| 13,137
| 5.458246
| 0.054737
| 0.126511
| 0.178195
| 0.070969
| 0.932759
| 0.927488
| 0.926074
| 0.917845
| 0.909617
| 0.890589
| 0
| 0.001236
| 0.260942
| 13,137
| 348
| 106
| 37.75
| 0.799876
| 0
| 0
| 0.826498
| 0
| 0
| 0.234833
| 0.024663
| 0
| 0
| 0
| 0
| 0.160883
| 1
| 0.078864
| false
| 0
| 0.015773
| 0
| 0.100946
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1a90b84c8cea56699a6b7ed92304c8ddf5d32adb
| 218
|
py
|
Python
|
pavo_cristatus/repositories/sqlite_repository/__init__.py
|
MATTHEWFRAZER/pavo_cristatus
|
a4b96c0eb6c454fbe38d2092e29f63457a4ee955
|
[
"MIT"
] | null | null | null |
pavo_cristatus/repositories/sqlite_repository/__init__.py
|
MATTHEWFRAZER/pavo_cristatus
|
a4b96c0eb6c454fbe38d2092e29f63457a4ee955
|
[
"MIT"
] | null | null | null |
pavo_cristatus/repositories/sqlite_repository/__init__.py
|
MATTHEWFRAZER/pavo_cristatus
|
a4b96c0eb6c454fbe38d2092e29f63457a4ee955
|
[
"MIT"
] | null | null | null |
from pavo_cristatus.repositories.sqlite_repository.sqlite_repository import SQLiteRepository
from pavo_cristatus.repositories.sqlite_repository.sqlite_repository import SQLiteRepository
__all__ = ["SQLiteRepository"]
| 43.6
| 92
| 0.894495
| 22
| 218
| 8.409091
| 0.409091
| 0.345946
| 0.183784
| 0.313514
| 0.897297
| 0.897297
| 0.897297
| 0.897297
| 0.897297
| 0.897297
| 0
| 0
| 0.055046
| 218
| 4
| 93
| 54.5
| 0.898058
| 0
| 0
| 0.666667
| 0
| 0
| 0.073395
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 13
|
46e4995b6c3fc2d97a3657592c42acdc10aa3455
| 157
|
py
|
Python
|
pkgs/bottleneck-1.0.0-np110py27_0/lib/python2.7/site-packages/bottleneck/slow/__init__.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/bottleneck-1.0.0-np110py27_0/lib/python2.7/site-packages/bottleneck/slow/__init__.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/bottleneck-1.0.0-np110py27_0/lib/python2.7/site-packages/bottleneck/slow/__init__.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
from bottleneck.slow.reduce import *
from bottleneck.slow.nonreduce import *
from bottleneck.slow.nonreduce_axis import *
from bottleneck.slow.move import *
| 31.4
| 44
| 0.821656
| 21
| 157
| 6.095238
| 0.380952
| 0.4375
| 0.5625
| 0.5625
| 0.515625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101911
| 157
| 4
| 45
| 39.25
| 0.907801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
46e88f070af4f5d55c3c5c1bbba1bf7c9413b3f8
| 76
|
py
|
Python
|
pyspawn/qm_hamiltonian/__init__.py
|
blevine37/pySpawn17
|
4fa65cfc3b4d399bcb586506782d00f86b453139
|
[
"MIT"
] | 18
|
2018-03-30T16:11:13.000Z
|
2021-08-22T18:57:12.000Z
|
pyspawn/qm_hamiltonian/__init__.py
|
Quantum-Dynamics-Hub/pySpawn17
|
0b28d968c703266e7af3c8461b494fca0a2da3f8
|
[
"MIT"
] | 3
|
2018-03-30T17:26:51.000Z
|
2021-08-17T08:49:24.000Z
|
pyspawn/qm_hamiltonian/__init__.py
|
Quantum-Dynamics-Hub/pySpawn17
|
0b28d968c703266e7af3c8461b494fca0a2da3f8
|
[
"MIT"
] | 6
|
2018-11-21T15:30:38.000Z
|
2021-07-05T05:37:15.000Z
|
import pyspawn.qm_hamiltonian.adiabatic
import pyspawn.qm_hamiltonian.dgas
| 19
| 39
| 0.881579
| 10
| 76
| 6.5
| 0.6
| 0.4
| 0.461538
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065789
| 76
| 3
| 40
| 25.333333
| 0.915493
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2003d37b28ea7c61fdd7dcf6ce9a37442bb72d73
| 1,039
|
py
|
Python
|
pylib/pylib/libscriptdefs.py
|
mendozagabe1618/named-entities-count-hadoop-mr
|
a340cece1c6631eb9dc8c5ec82924fbcf02c0d6e
|
[
"Apache-2.0"
] | null | null | null |
pylib/pylib/libscriptdefs.py
|
mendozagabe1618/named-entities-count-hadoop-mr
|
a340cece1c6631eb9dc8c5ec82924fbcf02c0d6e
|
[
"Apache-2.0"
] | null | null | null |
pylib/pylib/libscriptdefs.py
|
mendozagabe1618/named-entities-count-hadoop-mr
|
a340cece1c6631eb9dc8c5ec82924fbcf02c0d6e
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
from subprocess import Popen, PIPE, STDOUT
# from shutil import copyfile
# Executes a command cmd, displays stdout output in realtime
# from http://blog.kagesenshi.org/2008/02/teeing-python-subprocesspopen-output.html
def execute_with_args(cmd, args):
p = subprocess.Popen([cmd, args], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = []
while True:
line = p.stdout.readline()
stdout.append(line)
print line,
if line == '' and p.poll() != None:
break
return ''.join(stdout)
# Executes a command cmd, displays stdout output in realtime
# from http://blog.kagesenshi.org/2008/02/teeing-python-subprocesspopen-output.html
def execute(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = []
while True:
line = p.stdout.readline()
stdout.append(line)
print line,
if line == '' and p.poll() != None:
break
return ''.join(stdout)
| 30.558824
| 99
| 0.663138
| 132
| 1,039
| 5.204545
| 0.348485
| 0.026201
| 0.046579
| 0.055313
| 0.803493
| 0.803493
| 0.803493
| 0.803493
| 0.803493
| 0.803493
| 0
| 0.014778
| 0.218479
| 1,039
| 33
| 100
| 31.484848
| 0.831281
| 0.297401
| 0
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.090909
| null | null | 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
200e14a7f88c9a036c45bbb73a74511145ae676b
| 114
|
py
|
Python
|
e2e_tests/sanity.py
|
Gei0r/cquery
|
6ff8273e8b8624016f9363f444acfee30c4bbf64
|
[
"MIT"
] | 1,652
|
2018-01-24T03:19:58.000Z
|
2020-07-28T19:04:00.000Z
|
e2e_tests/sanity.py
|
Gei0r/cquery
|
6ff8273e8b8624016f9363f444acfee30c4bbf64
|
[
"MIT"
] | 490
|
2018-01-24T00:55:38.000Z
|
2020-07-03T19:44:16.000Z
|
e2e_tests/sanity.py
|
Gei0r/cquery
|
6ff8273e8b8624016f9363f444acfee30c4bbf64
|
[
"MIT"
] | 154
|
2018-01-31T05:57:33.000Z
|
2020-07-05T00:02:46.000Z
|
import e2e_test_runner
def Test_Sanity():
return (e2e_test_runner.TestBuilder()
.SetupCommonInit())
| 16.285714
| 39
| 0.719298
| 13
| 114
| 5.923077
| 0.692308
| 0.181818
| 0.337662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 0.184211
| 114
| 6
| 40
| 19
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
201458bdf2cbbbceed79d6c31875c02085709275
| 168
|
py
|
Python
|
dataloader/__init__.py
|
lbaitemple/suiron_raspberrypi
|
6bbc4633ac19e8d1221ac23aeb9892b3dbf44f0f
|
[
"MIT"
] | null | null | null |
dataloader/__init__.py
|
lbaitemple/suiron_raspberrypi
|
6bbc4633ac19e8d1221ac23aeb9892b3dbf44f0f
|
[
"MIT"
] | null | null | null |
dataloader/__init__.py
|
lbaitemple/suiron_raspberrypi
|
6bbc4633ac19e8d1221ac23aeb9892b3dbf44f0f
|
[
"MIT"
] | 2
|
2020-02-25T00:43:00.000Z
|
2020-08-19T15:05:34.000Z
|
from suiron.SuironIO import SuironIO
from suiron.SuironVZ import *
from suiron.img_serializer import *
from suiron.file_finder import *
from suiron.datasets import *
| 28
| 36
| 0.815476
| 23
| 168
| 5.869565
| 0.434783
| 0.37037
| 0.355556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130952
| 168
| 5
| 37
| 33.6
| 0.924658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6458224dc6449d1b02a6e257f9bb2d654f388629
| 38,936
|
py
|
Python
|
exarl/network/data_structures.py
|
schr476/EXARL
|
7f4596bd8b3d7960aaf52bc677ceac4f37029834
|
[
"BSD-3-Clause"
] | 2
|
2022-02-03T20:33:17.000Z
|
2022-02-10T22:43:32.000Z
|
exarl/network/data_structures.py
|
schr476/EXARL
|
7f4596bd8b3d7960aaf52bc677ceac4f37029834
|
[
"BSD-3-Clause"
] | 40
|
2022-01-25T18:03:12.000Z
|
2022-03-31T21:43:32.000Z
|
exarl/network/data_structures.py
|
schr476/EXARL
|
7f4596bd8b3d7960aaf52bc677ceac4f37029834
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T14:33:30.000Z
|
2022-02-10T14:33:30.000Z
|
# © (or copyright) 2020. Triad National Security, LLC. All rights reserved.
#
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and
# to permit others to do so.
import sys
import os
import numpy as np
from exarl.base import ExaData
from exarl.base.comm_base import ExaComm
from exarl.network.simple_comm import ExaSimple
from exarl.network.typing import TypeUtils
from exarl.utils.introspect import introspectTrace
MPI = ExaSimple.MPI
class ExaMPIConstant:
"""
This class is built to maintain a single value using mpi rdma.
Each rank will have a window the size of the type.
Attributes
----------
comm : mpi4py.MPI.Comm
raw MPI communicator
npType : type
numpy type of constant
mpiType : type
mpi type of the constant
rank : int
rank that hosts the data
win : MPI.win
MPI window for constant
sum : int
internal constant numpy 1 for incrementing
buff : numpy array
internal numpy buffer used for RMA ops
name : string
name of the constant for debugging
"""
def __init__(self, comm, rank_mask, the_type, name=None):
"""
Parameters
----------
comm : mpi4py.MPI.Comm
Communicator for all ranks involved
rank_mask : int, optional
host of the window
the_type : int, optional
python type (int, float)
name : string, optional
name of constant for debbuging
"""
self.comm = comm.raw()
self.npType = TypeUtils.np_type_converter(the_type, promote=True)
self.mpiType = TypeUtils.mpi_type_converter(the_type, promote=True)
self.size = self.mpiType.Get_size()
data = None
if rank_mask:
self.rank = self.comm.rank
data = np.zeros(1, dtype=self.npType)
self.win = MPI.Win.Create(data, self.size, comm=self.comm)
self.sum = np.ones(1, dtype=self.npType)
self.buff = np.zeros(1, dtype=self.npType)
self.name = name
@introspectTrace(name=True)
def put(self, value, rank):
"""
Places a constant on a given rank
Parameters
----------
value: int
Number to send to all ranks
rank: integer
Host rank of the actual number
"""
data = np.array(value, dtype=self.npType)
self.win.Lock(rank)
self.win.Accumulate(data, target_rank=rank, op=MPI.REPLACE)
self.win.Unlock(rank)
@introspectTrace(name=True)
def get(self, rank):
"""
Gets a constant from a given rank
Parameters
----------
rank : integer
Host rank of the actual number
Returns
-------
int
Constant from host rank
"""
self.win.Lock(rank)
self.win.Get_accumulate(self.sum, self.buff, target_rank=rank, op=MPI.NO_OP)
self.win.Unlock(rank)
return self.buff[0]
@introspectTrace(name=True)
def inc(self, rank):
"""
Increments a constant on host rank
Parameters
----------
rank : integer
Host rank of the actual number
Returns
-------
int
Constant from host rank before the increment
"""
self.win.Lock(rank)
self.win.Get_accumulate(self.sum, self.buff, target_rank=rank, op=MPI.SUM)
self.win.Unlock(rank)
return self.buff[0]
@introspectTrace(name=True)
def min(self, value, rank):
"""
Takes the min of new value and constant on host rank
Parameters
----------
value : integer
To value to compare constant with
rank : integer
Host rank of the actual number
Returns
-------
int
Minimum of the new value and constant
"""
data = np.array(value, dtype=self.npType)
self.win.Lock(rank)
self.win.Get_accumulate(data, self.buff, target_rank=rank, op=MPI.MIN)
self.win.Unlock(rank)
return min(self.buff[0], value)
class ExaMPIBuffUnchecked(ExaData):
"""
This class is creates an RMA buffer of a fixed size on each rank.
The buffer is used to send and recieve data across all participating ranks.
This buffer does not check to see if it is overwriting data or if there is
valid data from a get. This class always succeds a pop.
Attributes
----------
comm : mpi4py.MPI.Comm
raw MPI communicator
win : MPI.win
MPI window for buffer
buff : bytearray
internal buffer used for RMA ops
**Intializer**
Parameters
----------
comm : MPI Comm
Communicator for all ranks involved
data : list
Example data used to create buffer
rank_mask : int, optional
host of the window
length : int, optional
Not used
max_model_lag : int, optional
Not used
failPush : bool, optional
Not used
name : string, optional
name of constant for debbuging
"""
def __init__(self, comm, data, rank_mask=None, length=1, max_model_lag=None, failPush=False, name=None):
"""
Parameters
----------
comm : MPI Comm
Communicator for all ranks involved
data : list
Example data used to create buffer
rank_mask : int, optional
host of the window
length : int, optional
Not used
max_model_lag : int, optional
Not used
failPush : bool, optional
Not used
name : string, optional
name of constant for debbuging
"""
self.comm = comm
dataBytes = MPI.pickle.dumps(data)
size = len(dataBytes)
super().__init__(bytes, size, comm_size=comm.size, max_model_lag=None, name=name)
totalSize = 0
if rank_mask:
totalSize = size
self.win = MPI.Win.Allocate(totalSize, disp_unit=1, comm=self.comm.raw())
self.buff = bytearray(self.dataSize)
# If we are given data to start lets put it in our buffer
# Since everyone should call this everyone should get a start value!
if rank_mask:
self.push(data)
def __del__(self):
self.win.Free()
@introspectTrace(name=True)
def pop(self, rank, count=1):
"""
Returns value of buffer at given rank. There is no check
done to see if the data is valid.
Parameters
----------
rank : integer
Host rank where to take data from
count : integer
How many pops to perform
Returns
-------
list
Buffer at given rank
"""
self.win.Lock(rank)
self.win.Get_accumulate(
self.buff,
self.buff,
rank,
target=[0, self.dataSize],
op=MPI.NO_OP,
)
self.win.Unlock(rank)
return MPI.pickle.loads(self.buff)
@introspectTrace(name=True)
def push(self, data, rank=None):
"""
Pushes data to a rank's buffer.
Parameters
----------
data : list
Data to be pushed to rank's buffer
rank : integer
Host rank of the actual number
Returns
-------
list
Returns a capacity of 1 and loss of 1
"""
if rank is None:
rank = self.comm.rank
toSend = MPI.pickle.dumps(data)
assert len(toSend) <= self.dataSize
self.win.Lock(rank)
# Accumulate is element-wise atomic vs put which is not
self.win.Accumulate(
toSend, rank, target=[0, len(toSend)], op=MPI.REPLACE
)
self.win.Unlock(rank)
return 1, 1
class ExaMPIBuffChecked(ExaData):
"""
This class is creates an RMA buffer of a fixed size on each rank.
The buffer is used to send and recieve data across all participating ranks.
On pop, checks to see if the data is first valid.
Attributes
----------
comm : mpi4py.MPI.Comm
raw MPI communicator
win : MPI.win
MPI window for buffer
buff : bytearray
internal buffer used for RMA ops
Methods
-------
pop(value, rank, count)
Returns value stored in buffer at rank
push(self, data, rank)
Pushes data to buffer at rank
"""
def __init__(self, comm, data, rank_mask=None, length=1, max_model_lag=None, failPush=False, name=None):
"""
Parameters
----------
comm : mpi4py.MPI.Comm
Communicator for all ranks involved
data : list
Example data used to create buffer
rank_mask : int, optional
host of the window
length : int
Not used
max_model_lag : int
Not used
failPush : bool
Not used
name : string, optional
name of constant for debbuging
"""
self.comm = comm
self.dataBytes = bytearray(MPI.pickle.dumps((data, np.int64(0))))
size = len(self.dataBytes)
super().__init__(bytes, size, comm_size=comm.size, max_model_lag=None, name=name)
totalSize = 0
if rank_mask:
totalSize = size
self.win = MPI.Win.Allocate(totalSize, disp_unit=1, comm=self.comm.raw())
self.buff = bytearray(self.dataSize)
if rank_mask:
self.win.Lock(self.comm.rank)
self.win.Accumulate(
self.dataBytes, self.comm.rank, target=[0, self.dataSize], op=MPI.REPLACE
)
self.win.Unlock(self.comm.rank)
def __del__(self):
self.win.Free()
@introspectTrace(name=True)
def pop(self, rank, count=1):
"""
Returns value of buffer at given rank.
Checks to see if the data is valid first.
Parameters
----------
rank : integer
Host rank where to take data from
count : integer, optional
How many pops to perform
Returns
-------
list
Buffer at given rank if valid
"""
self.win.Lock(rank)
self.win.Get_accumulate(
self.dataBytes,
self.buff,
rank,
target=[0, self.dataSize],
op=MPI.REPLACE
)
self.win.Unlock(rank)
data, valid = MPI.pickle.loads(self.buff)
if valid:
return data
return None
@introspectTrace(name=True)
def push(self, data, rank=None):
"""
Pushes data to a rank's buffer.
Parameters
----------
data : list
Data to be pushed to rank's buffer
rank : integer, optional
Host rank of the actual number
Returns
-------
list
Returns a capacity of 1 and loss if data is overwritten
"""
if rank is None:
rank = self.comm.rank
toSend = bytearray(MPI.pickle.dumps((data, np.int64(1))))
assert len(toSend) <= self.dataSize
self.win.Lock(rank)
self.win.Get_accumulate(
toSend,
self.buff,
rank,
target=[0, self.dataSize],
op=MPI.REPLACE
)
self.win.Unlock(rank)
_, valid = MPI.pickle.loads(self.buff)
return 1, valid == 1
class ExaMPIDistributedQueue(ExaData):
"""
This class creates a circular buffer in an RMA window across nodes in a communicator.
Only one RMA window is made of length entries, thus there is only one host.
Attributes
----------
comm : mpi4py.MPI.Comm
raw MPI communicator
length : int
capacity of the queue
failPush : bool
flag setting if push can overwrite data
buff : bytearray
internal buffer for queue used for RMA ops
plus : np.array
numpy constant for adding
minus : np.array
numpy constant for subtracting
headBuffer : np.array
buffer containing head counter
tailBuffer : np.array
buffer containing tail counter
head : MPI.win
RMA window based on headBuffer
tail : MPI.win
RMA window based on tailBuffer
win : MPI.win
MPI window based on buffer for queue
"""
def __init__(self, comm, data=None, rank_mask=None, length=32, max_model_lag=None, failPush=False, name=None):
"""
Parameters
----------
comm : mpi4py.MPI.Comm
Communicator for all ranks involved
data : list, optional
Example data used to create buffer
rank_mask : int, optional
host of the window
length : int, optional
capacity of queue
max_model_lag : int, optional
Will not consider data past given model valide
failPush : bool, optional
Fail to overwrite data if queue is full
name : string, optional
name of constant for debbuging
"""
self.comm = comm
self.length = length
# This lets us fail a push when at full capacity
# Otherwise will overwrite the oldest data
self.failPush = failPush
dataBytes = MPI.pickle.dumps(data)
size = len(dataBytes)
super().__init__(bytes, size, comm_size=comm.size, max_model_lag=max_model_lag, name=name)
self.buff = bytearray(self.dataSize)
self.plus = np.array([1], dtype=np.int64)
self.minus = np.array([-1], dtype=np.int64)
totalSize = 0
self.headBuff = None
self.tailBuff = None
disp = MPI.DOUBLE.Get_size()
if rank_mask:
totalSize = size * self.length
self.headBuff = np.zeros(1, dtype=np.int64)
self.tailBuff = np.zeros(1, dtype=np.int64)
# Setup head window
self.head = MPI.Win.Create(self.headBuff, disp, comm=self.comm.raw())
# Setup tail window
self.tail = MPI.Win.Create(self.tailBuff, disp, comm=self.comm.raw())
# Setup data window
self.win = MPI.Win.Allocate(totalSize, disp_unit=size, comm=self.comm.raw())
def __del__(self):
self.win.Free()
@introspectTrace(name=True)
def pop(self, rank, count=1):
"""
Returns data from head of queue if there is data.
Parameters
----------
rank : integer
Host rank where to take data from
count : integer, optional
How many pops to perform
Returns
-------
list
Data from queue if there is any.
"""
ret = True
head = np.zeros(1, dtype=np.int64)
tail = np.zeros(1, dtype=np.int64)
rank = int(rank)
self.head.Lock(rank)
self.tail.Lock(rank)
# Read the head and tail pointers.
reqHead = self.head.Rget_accumulate(self.minus, head, rank, op=MPI.NO_OP)
reqTail = self.tail.Rget_accumulate(self.plus, tail, rank, op=MPI.SUM)
reqHead.wait()
reqTail.wait()
# Is there space
if head[0] > tail[0]:
index = tail[0] % self.length
self.win.Lock(rank)
self.win.Get_accumulate(
self.buff,
self.buff,
rank,
target=[index, self.dataSize],
op=MPI.NO_OP,
)
self.win.Unlock(rank)
else:
# Dec the tail pointer
self.tail.Accumulate(self.minus, rank, op=MPI.SUM)
ret = False
self.tail.Unlock(rank)
self.head.Unlock(rank)
if ret:
return MPI.pickle.loads(self.buff)
return None
@introspectTrace(name=True)
def push(self, data, rank=None):
"""
Pushes data to a rank's queue.
Parameters
----------
data : list
Data to be pushed to rank's queue
rank : integer, optional
Rank to push data to
Returns
-------
list
Returns a capacity of queue and loss if data is overwritten
"""
if rank is None:
rank = self.comm.rank
toSend = MPI.pickle.dumps(data)
assert len(toSend) <= self.dataSize
head = np.zeros(1, dtype=np.int64)
tail = np.zeros(1, dtype=np.int64)
self.head.Lock(rank)
self.tail.Lock(rank)
reqHead = self.head.Rget_accumulate(self.plus, head, rank, op=MPI.SUM)
reqTail = self.tail.Rget_accumulate(self.plus, tail, rank, op=MPI.NO_OP)
reqHead.wait()
reqTail.wait()
write = True
headIndex = head[0] % self.length
tailIndex = tail[0] % self.length
if head[0] > tail[0] and headIndex == tailIndex:
if self.failPush:
write = False
self.head.Accumulate(
self.minus, rank, op=MPI.SUM
)
else:
self.tail.Accumulate(
self.plus, rank, op=MPI.SUM
)
lost = 1
capacity = self.length
else:
lost = 0
capacity = head[0] - tail[0]
if write:
self.win.Lock(rank)
self.win.Accumulate(
toSend, rank, target=[headIndex, len(toSend)], op=MPI.REPLACE
)
self.win.Unlock(rank)
self.tail.Unlock(rank)
self.head.Unlock(rank)
return capacity, lost
class ExaMPIDistributedStack(ExaData):
"""
This class creates a stack in an RMA window across nodes in a communicator.
Only one window is made, thus there is only one host.
Attributes
----------
comm : mpi4py.MPI.Comm
raw MPI communicator
length : int
capacity of the stack
failPush : bool
flag setting if push can overwrite data
buff : bytearray
internal numpy buffer for stack used for RMA ops
plus : np.array
numpy constant for adding
minus : np.array
numpy constant for subtracting
headBuffer : np.array
buffer containing head counter
tailBuffer : np.array
buffer containing tail counter
head : MPI.win
window based on headBuffer
tail : MPI.win
window based on tailBuffer
win : MPI.win
MPI window based on buffer for stack
"""
def __init__(self, comm, data, rank_mask=None, length=32, max_model_lag=None, failPush=False, name=None):
"""
Parameters
----------
comm : mpi4py.MPI.Comm
Communicator for all ranks involved
data : list
Example data used to create buffer
rank_mask : int, optional
host of the window
length : int, optional
capacity of stack
max_model_lag : int
Will not consider data past given model valide
failPush : bool, optional
Fail to overwrite data if queue is full
name : string, optional
name of constant for debbuging
"""
self.comm = comm
self.length = length
# This lets us fail a push when at full capacity
# Otherwise will overwrite the oldest data
self.failPush = failPush
dataBytes = MPI.pickle.dumps(data)
size = len(dataBytes)
super().__init__(bytes, size, comm_size=comm.size, max_model_lag=max_model_lag, name=name)
self.buff = bytearray(self.dataSize)
self.plus = np.array([1], dtype=np.int64)
self.minus = np.array([-1], dtype=np.int64)
totalSize = 0
self.headBuff = None
self.tailBuff = None
disp = MPI.DOUBLE.Get_size()
if rank_mask:
totalSize = size * self.length
self.headBuff = np.zeros(1, dtype=np.int64)
self.tailBuff = np.zeros(1, dtype=np.int64)
# Setup head window
self.head = MPI.Win.Create(self.headBuff, disp, comm=self.comm.raw())
# Setup tail window
self.tail = MPI.Win.Create(self.tailBuff, disp, comm=self.comm.raw())
# Setup data window
self.win = MPI.Win.Allocate(totalSize, disp_unit=size, comm=self.comm.raw())
def __del__(self):
self.win.Free()
@introspectTrace(name=True)
def pop(self, rank, count=1):
"""
Returns data from head of stack if there is data.
Parameters
----------
rank : integer
Host rank where to take data from
count : integer, optional
How many pops to perform
Returns
-------
list
Data from stack if there is any.
"""
ret = False
head = np.zeros(1, dtype=np.int64)
tail = np.zeros(1, dtype=np.int64)
rank = int(rank)
self.head.Lock(rank)
self.tail.Lock(rank)
# Read the head and tail pointers.
reqHead = self.head.Rget_accumulate(self.minus, head, rank, op=MPI.SUM)
reqTail = self.tail.Rget_accumulate(self.minus, tail, rank, op=MPI.NO_OP)
reqHead.wait()
reqTail.wait()
# print("InPop", head[0], tail[0])
if head[0] > tail[0]:
ret = True
index = (head[0] - 1) % self.length
self.win.Lock(rank)
self.win.Get_accumulate(
self.buff,
self.buff,
rank,
target=[index, self.dataSize],
op=MPI.NO_OP,
)
self.win.Unlock(rank)
else:
self.head.Accumulate(
self.plus, rank, op=MPI.SUM
)
self.tail.Unlock(rank)
self.head.Unlock(rank)
if ret:
return MPI.pickle.loads(self.buff)
return None
@introspectTrace(name=True)
def push(self, data, rank=None):
"""
Pushes data to a rank's stack.
Parameters
----------
data : list
Data to be pushed to rank's stack
rank : integer, optional
Host to push data to
Returns
-------
list
Returns a capacity of stack and loss if data is overwritten
"""
if rank is None:
rank = self.comm.rank
toSend = MPI.pickle.dumps(data)
assert len(toSend) == self.dataSize
head = np.zeros(1, dtype=np.int64)
tail = np.zeros(1, dtype=np.int64)
rank = int(rank)
self.head.Lock(rank)
self.tail.Lock(rank)
# Read the head and tail pointers.
reqHead = self.head.Rget_accumulate(self.plus, head, rank, op=MPI.SUM)
reqTail = self.tail.Rget_accumulate(self.plus, tail, rank, op=MPI.NO_OP)
reqHead.wait()
reqTail.wait()
# This is if we are going to loose data because we exceded capacity
write = True
if tail[0] + self.length == head[0]:
if self.failPush:
write = False
self.head.Accumulate(
self.minus, rank, op=MPI.SUM
)
else:
self.tail.Accumulate(
self.plus, rank, op=MPI.SUM
)
lost = 1
capacity = self.length
else:
lost = 0
capacity = head[0] - tail[0] + 1
if write:
# Actual write data
index = head[0] % self.length
self.win.Lock(rank)
self.win.Accumulate(
toSend, rank, target=[index, self.dataSize], op=MPI.REPLACE
)
self.win.Unlock(rank)
self.tail.Unlock(rank)
self.head.Unlock(rank)
return capacity, lost
class ExaMPICentralizedStack(ExaData):
"""
This class creates a stack in RMA windows across nodes in a communicator.
There is a stack per rank. Each rank acts as a host.
Attributes
----------
comm : mpi4py.MPI.Comm
raw MPI communicator
length : int
capacity of the stack
failPush : bool
flag setting if push can overwrite data
buff : bytearray
internal buffer for stack used for RMA ops
plus : np.array
numpy constant for adding
minus : np.array
numpy constant for subtracting
headBuffer : np.array
buffer containing head counter
tailBuffer : np.array
buffer containing tail counter
head : MPI.win
window based on headBuffer
tail : MPI.win
window based on tailBuffer
win : MPI.win
MPI window based on buffer for stack
"""
def __init__(self, comm, data, rank_mask=None, length=32, max_model_lag=None, failPush=False, name=None):
"""
Parameters
----------
comm : mpi4py.MPI.Comm
Communicator for all ranks involved
data : list
Example data used to create buffer
rank_mask : int, optional
host of the window
length : int, optional
capacity of stack
max_model_lag : int, optional
Will not consider data past given model valide
failPush : bool, optional
Fail to overwrite data if queue is full
name : string, optional
name of constant for debbuging
"""
self.comm = comm
if rank_mask:
self.rank = self.comm.rank
self.length = length
# This lets us fail a push when at full capacity
# Otherwise will overwrite the oldest data
self.failPush = failPush
dataBytes = MPI.pickle.dumps(data)
size = len(dataBytes)
super().__init__(bytes, size, comm_size=comm.size, max_model_lag=max_model_lag, name=name)
self.buff = bytearray(self.dataSize)
self.plus = np.array([1], dtype=np.int64)
self.minus = np.array([-1], dtype=np.int64)
totalSize = 0
headSize = 0
tailSize = 0
# if comm.rank == rank:
if rank_mask:
totalSize = size * self.length
headSize = MPI.INT64_T.Get_size()
tailSize = MPI.INT64_T.Get_size()
self.head = []
self.tail = []
self.win = []
for i in range(comm.size):
# Setup head window
self.head.append(MPI.Win.Allocate(headSize, comm=self.comm.raw()))
self.head[i].Lock(self.rank)
self.head[i].Accumulate(
np.zeros(1, dtype=np.int64), self.rank, op=MPI.REPLACE
)
self.head[i].Unlock(self.rank)
self.head[i].Fence(self.rank)
# Setup tail window
self.tail.append(MPI.Win.Allocate(tailSize, comm=self.comm.raw()))
self.tail[i].Lock(self.rank)
self.tail[i].Accumulate(
np.zeros(1, dtype=np.int64), self.rank, op=MPI.REPLACE
)
self.tail[i].Unlock(self.rank)
self.tail[i].Fence(self.rank)
# Setup data window
self.win.append(
MPI.Win.Allocate(totalSize, disp_unit=size, comm=self.comm.raw())
)
self.win[i].Fence(self.rank)
def __del__(self):
for i in range(self.comm.size):
self.win[i].Free()
self.head[i].Free()
@introspectTrace(name=True)
def pop(self, rank, count=1):
"""
Returns data from head of stack if there is data.
Parameters
----------
rank : integer
Host rank where to take data from
count : integer
How many pops to perform
Returns
-------
list
Data from stack if there is any.
"""
ret = False
head = np.zeros(1, dtype=np.int64)
tail = np.zeros(1, dtype=np.int64)
rank = int(rank)
self.head[rank].Lock(self.rank)
self.tail[rank].Lock(self.rank)
# Read the head and tail pointers.
reqHead = self.head[rank].Rget_accumulate(self.minus, head, self.rank, op=MPI.SUM)
reqTail = self.tail[rank].Rget_accumulate(self.minus, tail, self.rank, op=MPI.NO_OP)
reqHead.wait()
reqTail.wait()
# print("InPop", head[0], tail[0])
if head[0] > tail[0]:
ret = True
index = (head[0] - 1) % self.length
self.win[rank].Lock(self.rank)
self.win[rank].Get_accumulate(
self.buff,
self.buff,
self.rank,
target=[index, self.dataSize],
op=MPI.NO_OP,
)
self.win[rank].Unlock(self.rank)
else:
self.head[rank].Accumulate(
self.plus, self.rank, op=MPI.SUM
)
self.tail[rank].Unlock(self.rank)
self.head[rank].Unlock(self.rank)
if ret:
return MPI.pickle.loads(self.buff)
return None
@introspectTrace(name=True)
def push(self, data, rank=None):
"""
Pushes data to a rank's stack.
Parameters
----------
data : list
Data to be pushed to rank's stack
rank : integer, optional
Rank to push data to
Returns
-------
list
Returns a capacity of stack and loss if data is overwritten
"""
if rank is None:
rank = self.comm.rank
toSend = MPI.pickle.dumps(data)
assert len(toSend) == self.dataSize
head = np.zeros(1, dtype=np.int64)
tail = np.zeros(1, dtype=np.int64)
rank = int(rank)
self.head[rank].Lock(self.rank)
self.tail[rank].Lock(self.rank)
# Read the head and tail pointers.
reqHead = self.head[rank].Rget_accumulate(self.plus, head, self.rank, op=MPI.SUM)
reqTail = self.tail[rank].Rget_accumulate(self.plus, tail, self.rank, op=MPI.NO_OP)
reqHead.wait()
reqTail.wait()
# This is if we are going to loose data because we exceded capacity
write = True
if tail[0] + self.length == head[0]:
if self.failPush:
write = False
self.head[rank].Accumulate(
self.minus, self.rank, op=MPI.SUM
)
else:
self.tail[rank].Accumulate(
self.plus, self.rank, op=MPI.SUM
)
lost = 1
capacity = self.length
else:
lost = 0
capacity = head[0] - tail[0] + 1
if write:
# Actual write data
index = head[0] % self.length
self.win[rank].Lock(self.rank)
self.win[rank].Accumulate(
toSend, self.rank, target=[index, self.dataSize], op=MPI.REPLACE
)
self.win[rank].Unlock(self.rank)
self.tail[rank].Unlock(self.rank)
self.head[rank].Unlock(self.rank)
return capacity, lost
class ExaMPICentralizedQueue(ExaData):
"""
This class creates circular buffers in RMA windows across nodes in a communicator.
There is a queue per rank. Each rank acts as a host.
Attributes
----------
comm : mpi4py.MPI.Comm
raw MPI communicator
length : int
capacity of the queue
failPush : bool
flag setting if push can overwrite data
buff : bytearray
internal buffer for queue used for RMA ops
plus : np.array
numpy constant for adding
minus : np.array
numpy constant for subtracting
headBuffer : np.array
buffer containing head counter
tailBuffer : np.array
buffer containing tail counter
head : MPI.win
window based on headBuffer
tail : MPI.win
window based on tailBuffer
win : MPI.win
MPI window based on buffer for queue
"""
def __init__(self, comm, data, rank_mask=None, length=32, max_model_lag=None, failPush=False, name=None):
"""
Parameters
----------
comm : mpi4py.MPI.Comm
Communicator for all ranks involved
data : list
Example data used to create buffer
rank_mask : int, optional
host of the window
length : int, optional
capacity of queue
max_model_lag : int, optional
Will not consider data past given model valide
failPush : bool, optional
Fail to overwrite data if queue is full
name : string, optional
name of constant for debbuging
"""
self.comm = comm
if rank_mask:
self.rank = self.comm.rank
self.length = length
# This lets us fail a push when at full capacity
# Otherwise will overwrite the oldest data
self.failPush = failPush
dataBytes = MPI.pickle.dumps(data)
size = len(dataBytes)
super().__init__(bytes, size, comm_size=comm.size, max_model_lag=max_model_lag, name=name)
self.buff = bytearray(self.dataSize)
self.plus = np.array([1], dtype=np.int64)
self.minus = np.array([-1], dtype=np.int64)
totalSize = 0
headSize = 0
tailSize = 0
# if comm.rank == rank:
if rank_mask:
totalSize = size * self.length
headSize = MPI.INT64_T.Get_size()
tailSize = MPI.INT64_T.Get_size()
self.head = []
self.tail = []
self.win = []
for i in range(comm.size):
# Setup head window
self.head.append(MPI.Win.Allocate(headSize, comm=self.comm.raw()))
self.head[i].Lock(self.rank)
self.head[i].Accumulate(
np.zeros(1, dtype=np.int64), self.rank, op=MPI.REPLACE
)
self.head[i].Unlock(self.rank)
self.head[i].Fence(self.rank)
# Setup tail window
self.tail.append(MPI.Win.Allocate(tailSize, comm=self.comm.raw()))
self.tail[i].Lock(self.rank)
self.tail[i].Accumulate(
np.zeros(1, dtype=np.int64), self.rank, op=MPI.REPLACE
)
self.tail[i].Unlock(self.rank)
self.tail[i].Fence(self.rank)
# Setup data window
self.win.append(
MPI.Win.Allocate(totalSize, disp_unit=size, comm=self.comm.raw())
)
self.win[i].Fence(self.rank)
def __del__(self):
for i in range(self.comm.size):
self.win[i].Free()
self.head[i].Free()
@introspectTrace(name=True)
def pop(self, rank, count=1):
"""
Returns data from head of queue if there is data.
Parameters
----------
rank : integer
Host rank where to take data from
count : integer, optional
How many pops to perform
Returns
-------
list
Data from queue if there is any.
"""
ret = True
head = np.zeros(1, dtype=np.int64)
tail = np.zeros(1, dtype=np.int64)
rank = int(rank)
self.head[rank].Lock(self.rank)
self.tail[rank].Lock(self.rank)
# Read the head and tail pointers.
reqHead = self.head[rank].Rget_accumulate(self.minus, head, self.rank, op=MPI.NO_OP)
reqTail = self.tail[rank].Rget_accumulate(self.plus, tail, self.rank, op=MPI.SUM)
reqHead.wait()
reqTail.wait()
# Is there space
if head[0] > tail[0]:
index = tail[0] % self.length
self.win[rank].Lock(self.rank)
self.win[rank].Get_accumulate(
self.buff,
self.buff,
self.rank,
target=[index, self.dataSize],
op=MPI.NO_OP,
)
self.win[rank].Unlock(self.rank)
else:
# Dec the tail pointer
self.tail[rank].Accumulate(self.minus, self.rank, op=MPI.SUM)
ret = False
self.tail[rank].Unlock(self.rank)
self.head[rank].Unlock(self.rank)
if ret:
return MPI.pickle.loads(self.buff)
return None
@introspectTrace(name=True)
def push(self, data, rank=None):
"""
Pushes data to a rank's queue.
Parameters
----------
data : list
Data to be pushed to rank's queue
rank : integer, optional
Rank to push data to
Returns
-------
list
Returns a capacity of queue and loss if data is overwritten
"""
if rank is None:
rank = self.comm.rank
toSend = MPI.pickle.dumps(data)
assert len(toSend) <= self.dataSize
head = np.zeros(1, dtype=np.int64)
tail = np.zeros(1, dtype=np.int64)
self.head[rank].Lock(self.rank)
self.tail[rank].Lock(self.rank)
reqHead = self.head[rank].Rget_accumulate(self.plus, head, self.rank, op=MPI.SUM)
reqTail = self.tail[rank].Rget_accumulate(self.plus, tail, self.rank, op=MPI.NO_OP)
reqHead.wait()
reqTail.wait()
write = True
headIndex = head[0] % self.length
tailIndex = tail[0] % self.length
if head[0] > tail[0] and headIndex == tailIndex:
if self.failPush:
write = False
self.head[rank].Accumulate(
self.minus, self.rank, op=MPI.SUM
)
else:
self.tail[rank].Accumulate(
self.plus, self.rank, op=MPI.SUM
)
lost = 1
capacity = self.length
else:
lost = 0
capacity = head[0] - tail[0]
if write:
self.win[rank].Lock(self.rank)
self.win[rank].Accumulate(
toSend, self.rank, target=[headIndex, len(toSend)], op=MPI.REPLACE
)
self.win[rank].Unlock(self.rank)
self.tail[rank].Unlock(self.rank)
self.head[rank].Unlock(self.rank)
return capacity, lost
| 28.524542
| 114
| 0.553293
| 4,795
| 38,936
| 4.445881
| 0.069447
| 0.026644
| 0.015198
| 0.019514
| 0.887372
| 0.87663
| 0.868984
| 0.847031
| 0.833521
| 0.823107
| 0
| 0.009422
| 0.351243
| 38,936
| 1,364
| 115
| 28.545455
| 0.834481
| 0.339429
| 0
| 0.831579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010526
| 1
| 0.050877
| false
| 0
| 0.014035
| 0
| 0.112281
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
64980c6a6ab8ace8f03e1f7bffc0083f1ed1dbee
| 218
|
py
|
Python
|
torchkeras/__init__.py
|
lyhue1991/torchkeras
|
9d66849326ef196ebcbce254bd259fa4e34a1114
|
[
"Apache-2.0"
] | 63
|
2020-06-21T13:49:23.000Z
|
2022-03-04T01:18:03.000Z
|
torchkeras/__init__.py
|
laugh12321/torchkeras
|
87fc921c10f0e43a764892d7453ab227abb432f5
|
[
"Apache-2.0"
] | 8
|
2020-10-30T03:33:03.000Z
|
2022-03-30T06:54:09.000Z
|
torchkeras/__init__.py
|
laugh12321/torchkeras
|
87fc921c10f0e43a764892d7453ab227abb432f5
|
[
"Apache-2.0"
] | 15
|
2020-06-22T07:52:59.000Z
|
2022-03-14T02:59:33.000Z
|
from torchkeras.torchkeras import Model
from torchkeras.summary import summary
from torchkeras.torchtools import EarlyStopping
from torchkeras.lightkeras import LightModel
from torchkeras.torchkeras import __version__
| 36.333333
| 47
| 0.885321
| 25
| 218
| 7.56
| 0.4
| 0.37037
| 0.253968
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091743
| 218
| 5
| 48
| 43.6
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
649afe9e3bb1a79bac92ef44d7639b5c3f4aba54
| 155
|
py
|
Python
|
loldib/getratings/models/NA/na_chogath/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_chogath/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_chogath/__init__.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from .na_chogath_top import *
from .na_chogath_jng import *
from .na_chogath_mid import *
from .na_chogath_bot import *
from .na_chogath_sup import *
| 25.833333
| 30
| 0.774194
| 25
| 155
| 4.4
| 0.36
| 0.272727
| 0.590909
| 0.690909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 155
| 5
| 31
| 31
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
b3ad3650d5734cdb64142e6fe1b2c514be0e3cc2
| 114
|
py
|
Python
|
src/entity_coreference/__init__.py
|
gbekes/football-data-project
|
0ce1ea9ca421ebbd98e720621c5837ce990ff24e
|
[
"MIT"
] | 1
|
2021-10-09T20:55:53.000Z
|
2021-10-09T20:55:53.000Z
|
src/entity_coreference/__init__.py
|
gbekes/football-data-project
|
0ce1ea9ca421ebbd98e720621c5837ce990ff24e
|
[
"MIT"
] | 3
|
2021-09-17T14:48:35.000Z
|
2021-10-14T20:22:41.000Z
|
src/entity_coreference/__init__.py
|
sscu-budapest/football-data-project
|
70c01e6e0b871af3108af5d8d34848825cad7d8f
|
[
"MIT"
] | null | null | null |
from .evaluate import evaluate_coreference # noqa: F401
from .runner import run_entity_coreference # noqa: F401
| 38
| 56
| 0.807018
| 15
| 114
| 5.933333
| 0.6
| 0.337079
| 0.426966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 0.140351
| 114
| 2
| 57
| 57
| 0.846939
| 0.184211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b3be603190315621595aaae76fe8dae582d948be
| 33
|
py
|
Python
|
src/models/__init__.py
|
norikinishida/coreference-resolution
|
daa0f0ddb3caf8fbc364fd5af82f0def80c953a8
|
[
"Apache-2.0"
] | null | null | null |
src/models/__init__.py
|
norikinishida/coreference-resolution
|
daa0f0ddb3caf8fbc364fd5af82f0def80c953a8
|
[
"Apache-2.0"
] | null | null | null |
src/models/__init__.py
|
norikinishida/coreference-resolution
|
daa0f0ddb3caf8fbc364fd5af82f0def80c953a8
|
[
"Apache-2.0"
] | null | null | null |
from .joshi2020 import Joshi2020
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275862
| 0.121212
| 33
| 1
| 33
| 33
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b3fd2bdfd5bbaa89c9b9bbfd7a4788bb4ba623d0
| 211
|
py
|
Python
|
model/loss.py
|
ChunpingQiu/Sen2LCZ_CNN
|
5576567da658f945321280f37ff8d9bf46dd1818
|
[
"MIT"
] | null | null | null |
model/loss.py
|
ChunpingQiu/Sen2LCZ_CNN
|
5576567da658f945321280f37ff8d9bf46dd1818
|
[
"MIT"
] | null | null | null |
model/loss.py
|
ChunpingQiu/Sen2LCZ_CNN
|
5576567da658f945321280f37ff8d9bf46dd1818
|
[
"MIT"
] | 1
|
2021-08-19T03:35:05.000Z
|
2021-08-19T03:35:05.000Z
|
import torch.nn.functional as F
#import torch.nn as nn
import torch
def nll_loss(output, target):
return F.nll_loss(output, target)
def cel_loss(output, target):
return F.cross_entropy(output, target)
| 21.1
| 42
| 0.753555
| 35
| 211
| 4.428571
| 0.428571
| 0.309677
| 0.309677
| 0.245161
| 0.296774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151659
| 211
| 9
| 43
| 23.444444
| 0.865922
| 0.099526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.