hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cf015a5812526f66759913e79e36410d68a14dfd
| 24
|
py
|
Python
|
fair_flow/__init__.py
|
fairanswers/fair_flow
|
65e13b10fe140a6ddad30f2168a8836c463de95f
|
[
"MIT"
] | null | null | null |
fair_flow/__init__.py
|
fairanswers/fair_flow
|
65e13b10fe140a6ddad30f2168a8836c463de95f
|
[
"MIT"
] | null | null | null |
fair_flow/__init__.py
|
fairanswers/fair_flow
|
65e13b10fe140a6ddad30f2168a8836c463de95f
|
[
"MIT"
] | null | null | null |
from fair_flow import *
| 12
| 23
| 0.791667
| 4
| 24
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cf01b603d522d102bb9d0b02ec344303697e25a7
| 42
|
py
|
Python
|
models/ShallowConvNet/__init__.py
|
High-East/BCI-ToolBox
|
57015ae5fd008e8636889b9afba49c64c3a35ff3
|
[
"MIT"
] | 10
|
2022-01-09T02:35:54.000Z
|
2022-03-22T06:18:06.000Z
|
models/ShallowConvNet/__init__.py
|
High-East/BCI-ToolBox
|
57015ae5fd008e8636889b9afba49c64c3a35ff3
|
[
"MIT"
] | null | null | null |
models/ShallowConvNet/__init__.py
|
High-East/BCI-ToolBox
|
57015ae5fd008e8636889b9afba49c64c3a35ff3
|
[
"MIT"
] | null | null | null |
from .ShallowConvNet import ShallowConvNet
| 42
| 42
| 0.904762
| 4
| 42
| 9.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cf89843807d06a4111a9573b6b3bdc7a554edbb0
| 38
|
py
|
Python
|
hitherecli/hitherecli.py
|
ao/hitherecli
|
4c60db51e67207e4e566c2b4c7eb40ae9a88d85a
|
[
"MIT"
] | null | null | null |
hitherecli/hitherecli.py
|
ao/hitherecli
|
4c60db51e67207e4e566c2b4c7eb40ae9a88d85a
|
[
"MIT"
] | null | null | null |
hitherecli/hitherecli.py
|
ao/hitherecli
|
4c60db51e67207e4e566c2b4c7eb40ae9a88d85a
|
[
"MIT"
] | null | null | null |
def main():
print("hi there cli")
| 12.666667
| 25
| 0.578947
| 6
| 38
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236842
| 38
| 2
| 26
| 19
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
d849422a179135dcb9a3160dd4ae9914bb8802f5
| 284
|
py
|
Python
|
sul/remote_integrity/exceptions.py
|
nashirat/Final-Project-Sistem-Deteksi-Intrusi
|
4ceff47c6da9002d7df51926a0dd2935a798f5df
|
[
"MIT"
] | null | null | null |
sul/remote_integrity/exceptions.py
|
nashirat/Final-Project-Sistem-Deteksi-Intrusi
|
4ceff47c6da9002d7df51926a0dd2935a798f5df
|
[
"MIT"
] | null | null | null |
sul/remote_integrity/exceptions.py
|
nashirat/Final-Project-Sistem-Deteksi-Intrusi
|
4ceff47c6da9002d7df51926a0dd2935a798f5df
|
[
"MIT"
] | 1
|
2021-03-18T00:16:02.000Z
|
2021-03-18T00:16:02.000Z
|
#!/usr/bin/env python
class SulException(Exception):
"""
"""
class ConfigurationException(SulException):
pass
class ServerException(SulException):
pass
class DirectoryNotFoundException(SulException):
pass
class IntegrityException(SulException):
pass
| 12.347826
| 47
| 0.725352
| 23
| 284
| 8.956522
| 0.521739
| 0.31068
| 0.305825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183099
| 284
| 22
| 48
| 12.909091
| 0.887931
| 0.070423
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.444444
| 0
| 0
| 0.555556
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
d8664714fb77e9a938d8ee46ba23e8d7cf7e2060
| 122
|
py
|
Python
|
app/checks.py
|
agarwali/sugar-busters
|
ca2958603b14526c1f29514c0e85bd25b5776cde
|
[
"MIT"
] | null | null | null |
app/checks.py
|
agarwali/sugar-busters
|
ca2958603b14526c1f29514c0e85bd25b5776cde
|
[
"MIT"
] | null | null | null |
app/checks.py
|
agarwali/sugar-busters
|
ca2958603b14526c1f29514c0e85bd25b5776cde
|
[
"MIT"
] | null | null | null |
from everything import *
@app.route("/checks", methods = ["GET"])
def checks():
return render_template ("checks.html")
| 20.333333
| 40
| 0.688525
| 15
| 122
| 5.533333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 122
| 6
| 41
| 20.333333
| 0.783019
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2b0b7feffa56842ce75f0d9df58051e4867971e8
| 8,607
|
py
|
Python
|
cdk/consoleme_ecs_service/nested_stacks/iam_stack.py
|
avishayil/consoleme-ecs-service
|
357f290c23fb74c6752961a4a4582e4cbab54e0a
|
[
"MIT"
] | 2
|
2021-06-19T04:28:43.000Z
|
2021-06-19T06:12:25.000Z
|
cdk/consoleme_ecs_service/nested_stacks/iam_stack.py
|
avishayil/consoleme-ecs-service
|
357f290c23fb74c6752961a4a4582e4cbab54e0a
|
[
"MIT"
] | 10
|
2021-06-19T08:12:41.000Z
|
2021-06-20T22:00:34.000Z
|
cdk/consoleme_ecs_service/nested_stacks/iam_stack.py
|
avishayil/consoleme-ecs-service
|
357f290c23fb74c6752961a4a4582e4cbab54e0a
|
[
"MIT"
] | null | null | null |
"""
IAM stack for running ConsoleMe on ECS
"""
from aws_cdk import (
aws_iam as iam,
aws_s3 as s3,
core as cdk
)
class IAMStack(cdk.NestedStack):
"""
IAM stack for running ConsoleMe on ECS
"""
def __init__(self, scope: cdk.Construct, id: str,
s3_bucket: s3.Bucket, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Define IAM roles and policies
ecs_task_role = iam.Role(
self,
'TaskRole',
role_name='ConsolemeTaskRole',
assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
)
ecs_task_role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'access-analyzer:*',
'cloudtrail:*',
'cloudwatch:*',
'config:SelectResourceConfig',
'config:SelectAggregateResourceConfig',
'dynamodb:batchgetitem',
'dynamodb:batchwriteitem',
'dynamodb:deleteitem',
'dynamodb:describe*',
'dynamodb:getitem',
'dynamodb:getrecords',
'dynamodb:getsharditerator',
'dynamodb:putitem',
'dynamodb:query',
'dynamodb:scan',
'dynamodb:updateitem',
'sns:createplatformapplication',
'sns:createplatformendpoint',
'sns:deleteendpoint',
'sns:deleteplatformapplication',
'sns:getendpointattributes',
'sns:getplatformapplicationattributes',
'sns:listendpointsbyplatformapplication',
'sns:publish',
'sns:setendpointattributes',
'sns:setplatformapplicationattributes',
'sts:assumerole'
],
resources=['*']
)
)
ecs_task_role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['ses:sendemail', 'ses:sendrawemail'],
resources=['*']
)
)
ecs_task_role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'autoscaling:Describe*',
'cloudwatch:Get*',
'cloudwatch:List*',
'config:BatchGet*',
'config:List*',
'config:Select*',
'ec2:DescribeSubnets',
'ec2:describevpcendpoints',
'ec2:DescribeVpcs',
'iam:GetAccountAuthorizationDetails',
'iam:ListAccountAliases',
'iam:ListAttachedRolePolicies',
'ec2:describeregions',
's3:GetBucketPolicy',
's3:GetBucketTagging',
's3:ListAllMyBuckets',
's3:ListBucket',
's3:PutBucketPolicy',
's3:PutBucketTagging',
'sns:GetTopicAttributes',
'sns:ListTagsForResource',
'sns:ListTopics',
'sns:SetTopicAttributes',
'sns:TagResource',
'sns:UnTagResource',
'sqs:GetQueueAttributes',
'sqs:GetQueueUrl',
'sqs:ListQueues',
'sqs:ListQueueTags',
'sqs:SetQueueAttributes',
'sqs:TagQueue',
'sqs:UntagQueue'
],
resources=['*']
)
)
ecs_task_role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['s3:GetObject', 's3:ListBucket'],
resources=[s3_bucket.bucket_arn, s3_bucket.bucket_arn + '/*']
)
)
trust_role = iam.Role(
self,
'TrustRole',
role_name='ConsolemeTrustRole',
assumed_by=iam.ArnPrincipal(arn=ecs_task_role.role_arn)
)
trust_role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'access-analyzer:*',
'cloudtrail:*',
'cloudwatch:*',
'config:SelectResourceConfig',
'config:SelectAggregateResourceConfig',
'dynamodb:batchgetitem',
'dynamodb:batchwriteitem',
'dynamodb:deleteitem',
'dynamodb:describe*',
'dynamodb:getitem',
'dynamodb:getrecords',
'dynamodb:getsharditerator',
'dynamodb:putitem',
'dynamodb:query',
'dynamodb:scan',
'dynamodb:updateitem',
'sns:createplatformapplication',
'sns:createplatformendpoint',
'sns:deleteendpoint',
'sns:deleteplatformapplication',
'sns:getendpointattributes',
'sns:getplatformapplicationattributes',
'sns:listendpointsbyplatformapplication',
'sns:publish',
'sns:setendpointattributes',
'sns:setplatformapplicationattributes',
'sts:assumerole',
'autoscaling:Describe*',
'cloudwatch:Get*',
'cloudwatch:List*',
'config:BatchGet*',
'config:List*',
'config:Select*',
'ec2:DescribeSubnets',
'ec2:describevpcendpoints',
'ec2:DescribeVpcs',
'iam:GetAccountAuthorizationDetails',
'iam:ListAccountAliases',
'iam:ListAttachedRolePolicies',
'ec2:describeregions',
's3:GetBucketPolicy',
's3:GetBucketTagging',
's3:ListAllMyBuckets',
's3:ListBucket',
's3:PutBucketPolicy',
's3:PutBucketTagging',
'sns:GetTopicAttributes',
'sns:ListTagsForResource',
'sns:ListTopics',
'sns:SetTopicAttributes',
'sns:TagResource',
'sns:UnTagResource',
'sqs:GetQueueAttributes',
'sqs:GetQueueUrl',
'sqs:ListQueues',
'sqs:ListQueueTags',
'sqs:SetQueueAttributes',
'sqs:TagQueue',
'sqs:UntagQueue'
],
resources=['*']
)
)
ecs_task_execution_role = iam.Role(
self,
'TaskExecutionRole',
assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
)
ecs_task_execution_role.add_managed_policy(
iam.ManagedPolicy.from_managed_policy_arn(
self,
'ServiceRole',
managed_policy_arn='arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy'
)
)
create_configuration_lambda_role = iam.Role(
self,
'CreateConfigurationFileLambdaRole',
assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com')
)
create_configuration_lambda_role.add_managed_policy(
iam.ManagedPolicy.from_managed_policy_arn(
self,
'ConfigurationBasicExecution',
managed_policy_arn='arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole'
)
)
create_configuration_lambda_role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=['s3:PutObject', 's3:DeleteObject'],
resources = [s3_bucket.bucket_arn + '/*']
)
)
self.ecs_task_role = ecs_task_role
self.ecs_task_execution_role = ecs_task_execution_role
self.create_configuration_lambda_role = create_configuration_lambda_role
| 36.316456
| 106
| 0.472987
| 543
| 8,607
| 7.320442
| 0.257827
| 0.021132
| 0.022138
| 0.022642
| 0.790943
| 0.765283
| 0.765283
| 0.749182
| 0.749182
| 0.749182
| 0
| 0.006352
| 0.43302
| 8,607
| 236
| 107
| 36.470339
| 0.808197
| 0.012548
| 0
| 0.720379
| 0
| 0
| 0.330109
| 0.173549
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004739
| false
| 0
| 0.004739
| 0
| 0.014218
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2b1233e68b3d3d71447c701bd66999e9da0e1296
| 23
|
py
|
Python
|
jupyterbrowser/__init__.py
|
rupello/jupyterbrowser
|
5076d5588f1a3eb1a7868aa59c144fc8bc2849b8
|
[
"MIT"
] | null | null | null |
jupyterbrowser/__init__.py
|
rupello/jupyterbrowser
|
5076d5588f1a3eb1a7868aa59c144fc8bc2849b8
|
[
"MIT"
] | null | null | null |
jupyterbrowser/__init__.py
|
rupello/jupyterbrowser
|
5076d5588f1a3eb1a7868aa59c144fc8bc2849b8
|
[
"MIT"
] | null | null | null |
from .browse import ui
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2b1dc083641d6d9b1bcce64f23bdac5c338b9e30
| 219
|
py
|
Python
|
Snippets/intro-2/guessing_game.py
|
ursaMaj0r/python-csc-125
|
1d0968ad144112e24ae331c75aad58b74041593a
|
[
"MIT"
] | null | null | null |
Snippets/intro-2/guessing_game.py
|
ursaMaj0r/python-csc-125
|
1d0968ad144112e24ae331c75aad58b74041593a
|
[
"MIT"
] | null | null | null |
Snippets/intro-2/guessing_game.py
|
ursaMaj0r/python-csc-125
|
1d0968ad144112e24ae331c75aad58b74041593a
|
[
"MIT"
] | null | null | null |
# input
print('What is my favourite food?')
input_guess = input("Guess? ")
# response
while input_guess != 'electricity':
print("Not even close.")
input_guess = input("Guess? ")
print("You guessed it! Buzzzz")
| 21.9
| 35
| 0.675799
| 29
| 219
| 5
| 0.62069
| 0.344828
| 0.206897
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16895
| 219
| 9
| 36
| 24.333333
| 0.796703
| 0.063927
| 0
| 0.333333
| 0
| 0
| 0.435644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
2b1f2f860d2d6ae81f0d2442defb5d03d06ac9a2
| 14,040
|
py
|
Python
|
xraysyn/networks/unet.py
|
cpeng93/XraySyn
|
7309b2fbc28bceddbc80a03c2279540da391782a
|
[
"MIT"
] | 9
|
2021-09-27T14:41:48.000Z
|
2022-01-04T13:54:35.000Z
|
xraysyn/networks/unet.py
|
cpeng93/XraySyn
|
7309b2fbc28bceddbc80a03c2279540da391782a
|
[
"MIT"
] | 1
|
2021-12-29T10:50:12.000Z
|
2022-01-08T05:58:49.000Z
|
xraysyn/networks/unet.py
|
cpeng93/XraySyn
|
7309b2fbc28bceddbc80a03c2279540da391782a
|
[
"MIT"
] | 1
|
2022-03-18T16:42:22.000Z
|
2022-03-18T16:42:22.000Z
|
import torch
import torch.nn as nn
class UnetGenerator(nn.Module):
def __init__(
self, input_nc, output_nc, dimension="2d", mask_nc=0, num_downs=5, ngf=64,
norm_layer="none", up_layer="upsample2D", partial_conv=False, use_dropout=False,
use_tanh=True, output_feats=False):
assert num_downs >= 5
super(UnetGenerator, self).__init__()
norm_layer = {
"batch": {"2d": nn.BatchNorm2d, "3d": nn.BatchNorm3d}[dimension],
"instance": {"2d": nn.InstanceNorm2d, "3d": nn.InstanceNorm3d}[dimension],
"none": None}[norm_layer]
self.down0 = UnetDown(input_nc, ngf, mask_nc, dimension=dimension,
norm_layer=None, partial_conv=partial_conv)
self.down1 = UnetDown(ngf, ngf * 2, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down2 = UnetDown(ngf * 2, ngf * 4, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down3 = UnetDown(ngf * 4, ngf * 8, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
for i in range(4, num_downs):
setattr(
self, "down{}".format(i),
UnetDown(ngf * 8, ngf * 8, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv))
setattr(
self, "up{}".format(num_downs - 1),
UnetUp(ngf * 8, ngf * 8, mask_nc, dimension=dimension,
norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv))
for i in range(num_downs - 2, 3, -1):
setattr(
self, "up{}".format(i),
UnetUp(ngf * 16, ngf * 8, mask_nc, dimension=dimension,
use_dropout=use_dropout, norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv))
self.up3 = UnetUp(ngf * 16, ngf * 4, mask_nc, dimension=dimension,
norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv)
self.up2 = UnetUp(ngf * 8, ngf * 2, mask_nc, dimension=dimension,
norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv)
self.up1 = UnetUp(ngf * 4, ngf, mask_nc, dimension=dimension,
norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv)
self.up0 = UnetUp(ngf * 2, output_nc, mask_nc, dimension=dimension,
up_layer=up_layer, final=True, partial_conv=partial_conv, use_tanh=use_tanh)
self.num_downs = num_downs
self.output_feats = output_feats
def forward(self, x):
x0_down, x1_down = [None], [x]
for i in range(self.num_downs):
down = getattr(self, "down{}".format(i))
x0, x1 = down(x1_down[-1])
x0_down.append(x0)
x1_down.append(x1)
y_up = x1_down[-1]
if self.output_feats:
feats = [y_up]
for i in range(self.num_downs):
up = getattr(self, "up{}".format(self.num_downs - 1 - i))
y_up = up(y_up, x0_down[-2 - i])
feats.append(y_up)
return y_up, feats
else:
for i in range(self.num_downs):
up = getattr(self, "up{}".format(self.num_downs - 1 - i))
y_up = up(y_up, x0_down[-2 - i])
return y_up
class UnetEncoder(nn.Module):
def __init__(
self, input_nc, dimension="2d", mask_nc=0, num_downs=5, ngf=64,
norm_layer="none", partial_conv=False):
assert num_downs >= 5
super(UnetEncoder, self).__init__()
norm_layer = {
"batch": {"2d": nn.BatchNorm2d, "3d": nn.BatchNorm3d}[dimension],
"instance": {"2d": nn.InstanceNorm2d, "3d": nn.InstanceNorm3d}[dimension],
"none": None}[norm_layer]
self.down0 = UnetDown(input_nc, ngf, mask_nc, dimension=dimension,
norm_layer=None, partial_conv=partial_conv)
self.down1 = UnetDown(ngf, ngf * 2, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down2 = UnetDown(ngf * 2, ngf * 4, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down3 = UnetDown(ngf * 4, ngf * 8, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
for i in range(4, num_downs):
setattr(
self, "down{}".format(i),
UnetDown(ngf * 8, ngf * 8, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv))
self.num_downs = num_downs
def forward(self, x):
sides, y = [], x
for i in range(self.num_downs):
down = getattr(self, "down{}".format(i))
side, y = down(y)
sides.append(side)
return y, sides
class UnetNewEncoder(nn.Module):
def __init__(
self, input_nc, dimension="2d", mask_nc=0, num_downs=5, ngf=64,
norm_layer="none", partial_conv=False):
assert num_downs >= 5
super(UnetNewEncoder, self).__init__()
norm_layer = {
"batch": {"2d": nn.BatchNorm2d, "3d": nn.BatchNorm3d}[dimension],
"instance": {"2d": nn.InstanceNorm2d, "3d": nn.InstanceNorm3d}[dimension],
"none": None}[norm_layer]
self.down0 = UnetDown(input_nc, ngf, mask_nc, dimension=dimension,
norm_layer=None, partial_conv=partial_conv)
self.down1 = UnetDown(ngf, ngf * 2, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down2 = UnetDown(ngf * 2, ngf * 4, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down3 = UnetDown(ngf * 4, ngf * 8, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down4 = UnetDown(ngf * 8, ngf * 8, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down5 = UnetNewDown(ngf * 8, ngf * 16, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down6 = UnetNewDown(ngf * 16, ngf * 16, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down7 = UnetNewDown(ngf * 16, ngf * 32, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down8 = UnetNewDown(ngf * 32, ngf * 16, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down9 = UnetNewDown(ngf * 16, ngf * 8, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down10 = UnetNewDown(ngf * 8, ngf * 4, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down11 = UnetNewDown(ngf * 4, ngf * 2, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down12 = UnetNewDown(ngf * 2, 32, mask_nc, dimension=dimension,
norm_layer=norm_layer, partial_conv=partial_conv)
self.down13 = nn.Conv3d(32, 32, kernel_size=3, stride=1, padding=1)
self.num_downs = 13
def forward(self, x):
for i in range(self.num_downs):
down = getattr(self, "down{}".format(i))
_, x = down(x)
x = self.down13(x)
return x
class UnetDecoder(nn.Module):
def __init__(
self, output_nc, dimension="2d", mask_nc=0, num_ups=5, ngf=64,
norm_layer="none", num_inputs=1, up_layer="upsample", partial_conv=False,
use_dropout=False, use_tanh=True):
assert num_ups >= 5
super(UnetDecoder, self).__init__()
norm_layer = {
"batch": {"2d": nn.BatchNorm2d, "3d": nn.BatchNorm3d}[dimension],
"instance": {"2d": nn.InstanceNorm2d, "3d": nn.InstanceNorm3d}[dimension],
"none": None}[norm_layer]
setattr(
self, "up{}".format(num_ups - 1),
UnetUp(ngf * num_inputs * 8, ngf * 8, mask_nc, dimension=dimension,
norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv))
for i in range(num_ups - 2, 3, -1):
setattr(
self, "up{}".format(i),
UnetUp(ngf * (num_inputs + 1) * 8, ngf * 8, mask_nc, dimension=dimension,
use_dropout=use_dropout, norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv))
self.up3 = UnetUp(ngf * (num_inputs + 1) * 8, ngf * 4, mask_nc, dimension=dimension,
norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv)
self.up2 = UnetUp(ngf * (num_inputs + 1) * 4, ngf * 2, mask_nc, dimension=dimension,
norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv)
self.up1 = UnetUp(ngf * (num_inputs + 1) * 2, ngf, mask_nc, dimension=dimension,
norm_layer=norm_layer, up_layer=up_layer, partial_conv=partial_conv)
self.up0 = UnetUp(ngf * (num_inputs + 1), output_nc, mask_nc, dimension=dimension,
up_layer=up_layer, final=True, partial_conv=partial_conv, use_tanh=use_tanh)
self.num_ups = num_ups
def forward(self, x, sides):
y_up = x
for i in range(self.num_ups-1):
up = getattr(self, "up{}".format(self.num_ups - 1 - i))
y_up = up(y_up, sides[-2 - i])
y_up = self.up0(y_up)
return y_up
class UnetDown(nn.Module):
def __init__(
self, input_nc, output_nc, mask_nc=1, dimension="2d",
norm_layer=nn.BatchNorm2d, partial_conv=False
):
super(UnetDown, self).__init__()
conv_layer = {"2d": nn.Conv2d, "3d": nn.Conv3d}[dimension]
self.conv = nn.utils.spectral_norm(conv_layer(
input_nc + mask_nc, output_nc, kernel_size=3, stride=2, padding=1))
if norm_layer is not None:
self.norm = norm_layer(output_nc, affine=True)
self.mask_nc = mask_nc
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.partial_conv = partial_conv
def forward(self, x):
if self.mask_nc > 0:
if self.partial_conv:
x, y = (
x[:, :-self.mask_nc, ...],
x[:, -self.mask_nc:, ...]
)
x = x * (1 - y)
else:
y = x[:, -self.mask_nc:, ...]
if hasattr(self, "norm"): x0 = self.norm(self.conv(x))
else: x0 = self.conv(x)
x1 = self.leaky_relu(x0)
if self.mask_nc == 0: return x0, x1
else: return torch.cat([x0, y], 1), torch.cat([x1, y], 1)
class UnetNewDown(nn.Module):
def __init__(
self, input_nc, output_nc, mask_nc=1, dimension="2d",
norm_layer=nn.BatchNorm2d, partial_conv=False
):
super(UnetNewDown, self).__init__()
conv_layer = {"2d": nn.Conv2d, "3d": nn.Conv3d}[dimension]
self.conv = nn.utils.spectral_norm(conv_layer(
input_nc + mask_nc, output_nc, kernel_size=3, stride=1, padding=1))
if norm_layer is not None:
self.norm = norm_layer(output_nc, affine=True)
self.mask_nc = mask_nc
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.partial_conv = partial_conv
def forward(self, x):
if self.mask_nc > 0:
if self.partial_conv:
x, y = (
x[:, :-self.mask_nc, ...],
x[:, -self.mask_nc:, ...]
)
x = x * (1 - y)
else:
y = x[:, -self.mask_nc:, ...]
if hasattr(self, "norm"): x0 = self.norm(self.conv(x))
else: x0 = self.conv(x)
x1 = self.leaky_relu(x0)
if self.mask_nc == 0: return x0, x1
else: return torch.cat([x0, y], 1), torch.cat([x1, y], 1)
class UnetUp(nn.Module):
def __init__(
self, input_nc, output_nc, mask_nc=1, dimension="2d", final=False, use_dropout=False,
norm_layer=nn.BatchNorm2d, up_layer="upsample2D", partial_conv=False, use_tanh=True):
super(UnetUp, self).__init__()
# print('output_nc: ', input_nc + mask_nc, output_nc)
conv_layer = {"2d": nn.Conv2d, "3d": nn.Conv3d}[dimension]
deconv_layer = {"2d": nn.ConvTranspose2d, "3d": nn.ConvTranspose3d}[dimension]
# print(up_layer)
self.deconv = {
"deconv": nn.utils.spectral_norm(deconv_layer(
input_nc + mask_nc, output_nc, kernel_size=3,
stride=2, padding=1)),
"upsample2D": nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.utils.spectral_norm(conv_layer(
input_nc + mask_nc, output_nc, kernel_size=3,
stride=1, padding=1))),
"upsample3D": nn.Sequential(
nn.Upsample(scale_factor=2, mode='trilinear'),
nn.utils.spectral_norm(conv_layer(
input_nc + mask_nc, output_nc, kernel_size=3,
stride=1, padding=1)))
}[up_layer]
if final:
self.tanh = nn.Tanh() if use_tanh else nn.Identity()
else:
if norm_layer is not None:
self.norm = norm_layer(output_nc, affine=True)
if use_dropout: self.dropout = nn.Dropout(0.5)
self.relu = nn.ReLU(True)
self.partial_conv = partial_conv
self.mask_nc = mask_nc
def forward(self, x1, x2=None):
if self.partial_conv and self.mask_nc > 0: x1 = x1[:, :-self.mask_nc, ...]
y = self.deconv(x1)
if hasattr(self, "tanh"):
y = self.tanh(y)
else:
if hasattr(self, "norm"): y = self.norm(y)
if hasattr(self, "dropout"): y = self.dropout(y)
y = torch.cat([y, x2], 1)
y = self.relu(y)
return y
| 43.602484
| 114
| 0.589601
| 1,871
| 14,040
| 4.178514
| 0.068947
| 0.121003
| 0.08749
| 0.106933
| 0.831926
| 0.811205
| 0.793681
| 0.773599
| 0.759273
| 0.739575
| 0
| 0.029218
| 0.285755
| 14,040
| 321
| 115
| 43.738318
| 0.750399
| 0.004772
| 0
| 0.619565
| 0
| 0
| 0.021331
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 1
| 0.050725
| false
| 0
| 0.007246
| 0
| 0.105072
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2b5515021276f9874be9a16affec276b9a150040
| 14,891
|
py
|
Python
|
test/sorting/test_comparison_sorting.py
|
KentWangYQ/py-algorithms
|
3de7df52cd6ce82ce8ef9bbb76b693ffc69cef76
|
[
"MIT"
] | 5
|
2020-10-12T04:42:21.000Z
|
2022-03-30T03:32:34.000Z
|
test/sorting/test_comparison_sorting.py
|
KentWangYQ/py_algorithms
|
3de7df52cd6ce82ce8ef9bbb76b693ffc69cef76
|
[
"MIT"
] | null | null | null |
test/sorting/test_comparison_sorting.py
|
KentWangYQ/py_algorithms
|
3de7df52cd6ce82ce8ef9bbb76b693ffc69cef76
|
[
"MIT"
] | 3
|
2020-12-07T06:18:49.000Z
|
2022-03-10T15:20:59.000Z
|
# -*- coding: utf-8 -*-
import unittest
import random
import copy
from source.sorting import comparison_sorting
from source.sorting.comparison_sorting import SLNode
class ComparisionSortTest(unittest.TestCase):
def setUp(self):
self.a = [3, 2, -20, 309, -987, 2, 487, -20, 90, -5, 0, 98]
self.b = copy.deepcopy(self.a)
# region INSERTION SORT TEST
# 直接插入排序测试
def test_straight_insertion_sort(self):
"""
直接插入排序测试
:return:
"""
comparison_sorting.straight_insertion_sort(self.b)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after straight insertion sort!')
# 直接插入排序倒序测试
def test_straight_insertion_sort_reverse(self):
"""
直接插入排序倒序测试
:return:
"""
comparison_sorting.straight_insertion_sort(a=self.b, reverse=True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after straight insertion sort reverse!')
# 折半插入排序测试
def test_binary_insertion_sort(self):
"""
折半插入排序测试
:return:
"""
comparison_sorting.binary_insertion_sort(self.b)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after binary insertion sort!')
# 折半插入排序倒序测试
def test_binary_insertion_sort_reverse(self):
"""
折半插入排序倒序测试
:return:
"""
comparison_sorting.binary_insertion_sort(self.b, True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after binary insertion sort reverse!')
# 2路插入排序测试
def test_two_way_insertion_sort(self):
"""
2路插入排序测试
:return:
"""
comparison_sorting.two_way_insertion_sort(self.b)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after two way insertion sort!')
# 2路插入排序倒序测试
def test_two_way_insertion_sort_reverse(self):
"""
2路插入排序倒序测试
:return:
"""
comparison_sorting.two_way_insertion_sort(self.b, True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after two way insertion sort reverse!')
# 表插入排序测试
def test_list_insertion_sort(self):
"""
表插入排序测试
:return:
"""
comparison_sorting.list_insertion_sort(self.b)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after link list insertion sort!')
# 表插入排序倒序测试
def test_list_insertion_sort_reverse(self):
"""
表插入排序倒序测试
:return:
"""
comparison_sorting.list_insertion_sort(self.b, True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after link list insertion sort reverse!')
# 重排链表测试
def test_arrange(self):
"""
重排链表测试
:return:
"""
_keys, _next = [float('inf'), 49, 38, 76, 13, 27], [4, 3, 1, 0, 5, 2]
sl = [SLNode(k, _next[i]) for i, k in enumerate(_keys)]
comparison_sorting._arrange(sl)
expect = _keys[1:]
list.sort(expect)
actual = [sln.rc for sln in sl[1:]]
self.assertEqual(expect, actual, 'The link list is NOT sorted after _arrange!')
# 希尔排序测试
def test_shell_sort(self):
"""
希尔排序测试
:return:
"""
comparison_sorting.shell_sort(self.b, [5, 3, 1])
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after shell sort!')
# 希尔排序倒序测试
def test_shell_sort_reverse(self):
"""
希尔排序倒序测试
:return:
"""
comparison_sorting.shell_sort(self.b, [6, 4, 2, 1], True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after shell sort reverse!')
# endregion
# region QUICK SORT TEST
# 冒泡排序测试
def test_bubble_sort(self):
"""
冒泡排序测试
:return:
"""
comparison_sorting.bubble_sort(self.b)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after bubble sort!')
# 冒泡排序倒序测试
def test_bubble_sort_reverse(self):
"""
冒泡排序倒序测试
:return:
"""
comparison_sorting.bubble_sort(self.b, True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after bubble sort reverse!')
# 快速排序测试
def test_quick_sort(self):
"""
快速排序测试
:return:
"""
comparison_sorting.quick_sort(self.b)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after quick sort!')
# 快速排序倒序测试
def test_quick_sort_reverse(self):
"""
快速排序倒序测试
:return:
"""
comparison_sorting.quick_sort(self.b, reverse=True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after quick sort reverse!')
# 快速排序随机分割策略测试
def test_quick_sort_rd(self):
"""
快速排序随机分割策略测试
:return:
"""
comparison_sorting.quick_sort(self.b, randomized_partition=True)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after quick sort!')
# 快速排序随机分割策略倒序测试
def test_quick_sort_reverse_rd(self):
"""
快速排序随机分割策略倒序测试
:return:
"""
comparison_sorting.quick_sort(self.b, reverse=True, randomized_partition=True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after quick sort reverse!')
# endregion
# region SELECT SORT TEST
def test_simple_selection_sort(self):
comparison_sorting.simple_selection_sort(self.b)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after simple selection sort!')
def test_simple_selection_sort_reverse(self):
comparison_sorting.simple_selection_sort(self.b, True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after simple selection sort reverse!')
def test_tree_selection_sort(self):
a_tree_selection_sort_result = comparison_sorting.tree_selection_sort(self.a)
list.sort(self.a)
self.assertEqual(self.a, a_tree_selection_sort_result, 'The list is NOT sorted after tree selection sort!')
def test_heap_sort(self):
comparison_sorting.heap_sort(self.b)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after heap sort!')
def test_heap_sort_reverse(self):
comparison_sorting.heap_sort(self.b, reverse=True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after heap sort reverse!')
# endregion
# region MERGE SORT
# 合并有序序列测试
def test_merge(self):
"""
合并有序序列测试
:return:
"""
p, q, r = 1, 5, 10
a = [3, 2, -20, 309, -987, 2, 487, -20, 90, -5, 0, 98]
lv = a[p:q + 1]
rv = a[q + 1:r + 1]
list.sort(lv)
list.sort(rv)
a[p:q + 1] = lv
a[q + 1:r + 1] = rv
actual = copy.deepcopy(a)
comparison_sorting._merge(actual, p, q, r)
expect = a[p:r + 1]
list.sort(expect)
self.assertEqual(expect, actual[p:r + 1], 'The %d to %d items in list is NOT sorted after merge!' % (p, r + 1))
# 归并排序辅助方法测试
def test__merge(self):
"""
归并排序辅助方法测试
:return:
"""
p, q, r = 1, 5, 10
a = [3, 2, -20, 309, -987, 2, 487, -20, 90, -5, 0, 98]
lv = a[p:q + 1]
rv = a[q + 1:r + 1]
list.sort(lv, reverse=True)
list.sort(rv, reverse=True)
a[p:q + 1] = lv
a[q + 1:r + 1] = rv
actual = copy.deepcopy(a)
comparison_sorting._merge(actual, p, q, r, reverse=True)
expect = a[p:r + 1]
list.sort(expect, reverse=True)
self.assertEqual(expect, actual[p:r + 1],
'The %d to %d items in list is NOT sorted after merge reverse!' % (p, r + 1))
# 归并排序测试
def test_merge_sort(self):
"""
归并排序测试
:return:
"""
comparison_sorting.merge_sort(self.b)
list.sort(self.a)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after merge sort!')
# 归并排序倒序测试
def test_merge_sort_reverse(self):
"""
归并排序倒序测试
:return:
"""
comparison_sorting.merge_sort(a=self.b, reverse=True)
list.sort(self.a, reverse=True)
self.assertEqual(self.a, self.b, 'The list is NOT sorted after merge sort reverse!')
# endregion
# region SORTING RANDOM TEST
def test_sort_random_list(self):
t = 1000
a = [random.randint(t * -1, t) for _ in range(t)]
# region INSERTION SORT
a_straight_insertion_sort = copy.deepcopy(a)
comparison_sorting.straight_insertion_sort(a_straight_insertion_sort)
a_binary_insertion_sort = copy.deepcopy(a)
comparison_sorting.binary_insertion_sort(a_binary_insertion_sort)
a_two_way_insertion_sort = copy.deepcopy(a)
comparison_sorting.two_way_insertion_sort(a_two_way_insertion_sort)
a_list_insertion_sort = copy.deepcopy(a)
comparison_sorting.list_insertion_sort(a_list_insertion_sort)
a_shell_sort = copy.deepcopy(a)
comparison_sorting.shell_sort(a_shell_sort, [10, 6, 3, 1])
# endregion
# region QUICK SORT
a_bubble_sort = copy.deepcopy(a)
comparison_sorting.bubble_sort(a_bubble_sort)
a_quick_sort = copy.deepcopy(a)
comparison_sorting.quick_sort(a_quick_sort)
a_quick_sort_rd = copy.deepcopy(a)
comparison_sorting.quick_sort(a_quick_sort_rd, randomized_partition=True)
# endregion
# region SELECTION SORT
a_simple_selection_sort = copy.deepcopy(a)
comparison_sorting.simple_selection_sort(a_simple_selection_sort)
a_tree_selection_sort_result = comparison_sorting.tree_selection_sort(a)
a_heap_sort = copy.deepcopy(a)
comparison_sorting.heap_sort(a_heap_sort)
# endregion
# region MERGE SORT
a_merge_sort = copy.deepcopy(a)
comparison_sorting.merge_sort(a_merge_sort)
# endregion
list.sort(a)
# INSERTION SORT
self.assertEqual(a, a_straight_insertion_sort, 'The list is NOT sorted after straight insertion sort!')
self.assertEqual(a, a_binary_insertion_sort, 'The list is NOT sorted after binary insertion sort!')
self.assertEqual(a, a_two_way_insertion_sort, 'The list is NOT sorted after two way insertion sort!')
self.assertEqual(a, a_list_insertion_sort, 'The list is NOT sorted after list insertion sort!')
self.assertEqual(a, a_shell_sort, 'The list is NOT sorted after shell sort!')
# QUICK SORT
self.assertEqual(a, a_bubble_sort, 'The list is NOT sorted after bubble sort!')
self.assertEqual(a, a_quick_sort, 'The list is NOT sorted after quick sort!')
self.assertEqual(a, a_quick_sort_rd, 'The list is NOT sorted after quick sort rd!')
# SELECTION SORT
self.assertEqual(a, a_simple_selection_sort, 'The list is NOT sorted after simple selection sort!')
self.assertEqual(a, a_tree_selection_sort_result, 'The list is NOT sorted after tree selection sort!')
self.assertEqual(a, a_heap_sort, 'The list is NOT sorted after heap sort!')
# MERGE SORT
self.assertEqual(a, a_merge_sort, 'The list is NOT sorted after merge sort!')
def test_sort_random_list_reverse(self):
t = 1000
a = [random.randint(t * -1, t) for _ in range(t)]
# region INSERTION SORT
a_straight_insertion_sort = copy.deepcopy(a)
comparison_sorting.straight_insertion_sort(a_straight_insertion_sort, reverse=True)
a_binary_insertion_sort = copy.deepcopy(a)
comparison_sorting.binary_insertion_sort(a_binary_insertion_sort, reverse=True)
a_two_way_insertion_sort = copy.deepcopy(a)
comparison_sorting.two_way_insertion_sort(a_two_way_insertion_sort, reverse=True)
a_list_insertion_sort = copy.deepcopy(a)
comparison_sorting.list_insertion_sort(a_list_insertion_sort, reverse=True)
a_shell_sort = copy.deepcopy(a)
comparison_sorting.shell_sort(a_shell_sort, [10, 6, 3, 1], reverse=True)
# endregion
# region QUICK SORT
a_bubble_sort = copy.deepcopy(a)
comparison_sorting.bubble_sort(a_bubble_sort, reverse=True)
a_quick_sort = copy.deepcopy(a)
comparison_sorting.quick_sort(a_quick_sort, reverse=True)
a_quick_sort_rd = copy.deepcopy(a)
comparison_sorting.quick_sort(a_quick_sort_rd, reverse=True, randomized_partition=True)
# endregion
# region SELECTION SORT
a_simple_selection_sort = copy.deepcopy(a)
comparison_sorting.simple_selection_sort(a_simple_selection_sort, reverse=True)
a_heap_sort = copy.deepcopy(a)
comparison_sorting.heap_sort(a_heap_sort, reverse=True)
# endregion
# region MERGE SORT
a_merge_sort = copy.deepcopy(a)
comparison_sorting.merge_sort(a_merge_sort, reverse=True)
# endregion
list.sort(a, reverse=True)
# INSERTION SORT
self.assertEqual(a, a_straight_insertion_sort, 'The list is NOT sorted after straight insertion sort reverse!')
self.assertEqual(a, a_binary_insertion_sort, 'The list is NOT sorted after binary insertion sort reverse!')
self.assertEqual(a, a_two_way_insertion_sort, 'The list is NOT sorted after two way insertion sort reverse!')
self.assertEqual(a, a_list_insertion_sort, 'The list is NOT sorted after list insertion sort reverse!')
self.assertEqual(a, a_shell_sort, 'The list is NOT sorted after shell sort reverse!')
# QUICK SORT
self.assertEqual(a, a_bubble_sort, 'The list is NOT sorted after bubble sort reverse!')
self.assertEqual(a, a_quick_sort, 'The list is NOT sorted after quick sort reverse!')
self.assertEqual(a, a_quick_sort_rd, 'The list is NOT sorted after quick sort rd reverse!')
# SELECTION SORT
self.assertEqual(a, a_simple_selection_sort, 'The list is NOT sorted after simple selection sort reverse!')
self.assertEqual(a, a_heap_sort, 'The list is NOT sorted after heap sort reverse!')
# MERGE SORT
self.assertEqual(a, a_merge_sort, 'The list is NOT sorted after merge sort reverse!')
# endregion
| 34.3903
| 119
| 0.638909
| 2,012
| 14,891
| 4.524354
| 0.060139
| 0.062397
| 0.048446
| 0.080743
| 0.853235
| 0.794573
| 0.775788
| 0.743271
| 0.71427
| 0.694167
| 0
| 0.012924
| 0.262172
| 14,891
| 432
| 120
| 34.469907
| 0.8156
| 0.075079
| 0
| 0.331776
| 0
| 0
| 0.186285
| 0
| 0
| 0
| 0
| 0
| 0.228972
| 1
| 0.135514
| false
| 0
| 0.023364
| 0
| 0.163551
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2b61bdb022a74be250d9f3a670026e676ab6bd1a
| 122
|
py
|
Python
|
user-services/app/user_api/__init__.py
|
SalAlba/flask-microservices
|
8625e8fb3352d3704a17796635e95bbef25f1d06
|
[
"MIT"
] | null | null | null |
user-services/app/user_api/__init__.py
|
SalAlba/flask-microservices
|
8625e8fb3352d3704a17796635e95bbef25f1d06
|
[
"MIT"
] | null | null | null |
user-services/app/user_api/__init__.py
|
SalAlba/flask-microservices
|
8625e8fb3352d3704a17796635e95bbef25f1d06
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
user_blueprint = Blueprint('user', __name__, template_folder='templates')
from . import routes
| 40.666667
| 73
| 0.811475
| 15
| 122
| 6.2
| 0.666667
| 0.27957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 122
| 3
| 74
| 40.666667
| 0.845455
| 0
| 0
| 0
| 0
| 0
| 0.105691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
9919a629e013b4b51a66ef1dbf4fc2bbd7fe213b
| 5,333
|
py
|
Python
|
tests/api/v1/business/test_get_all.py
|
rogerokello/weConnect-api
|
e1fb136864842781063a60bae0764defb99e47c6
|
[
"MIT"
] | 1
|
2019-04-18T19:56:31.000Z
|
2019-04-18T19:56:31.000Z
|
tests/api/v1/business/test_get_all.py
|
rogerokello/weconnect-practice
|
e1fb136864842781063a60bae0764defb99e47c6
|
[
"MIT"
] | 6
|
2018-02-19T14:17:00.000Z
|
2018-07-08T08:38:02.000Z
|
tests/api/v1/business/test_get_all.py
|
rogerokello/weConnect-api
|
e1fb136864842781063a60bae0764defb99e47c6
|
[
"MIT"
] | 1
|
2018-02-26T13:05:49.000Z
|
2018-02-26T13:05:49.000Z
|
import unittest
import json
from app import create_app, db
from tests.api.v1 import BaseTestCase
class BusinessTestCase(BaseTestCase):
"""Test case for the business endpoint """
def test_it_works(self):
"""Test the API can get all business registered businesses (GET request)"""
# register a test user, then log them in
self._register_user()
result = self._login_user()
# obtain the access token
access_token = json.loads(result.data.decode())['access_token']
# first add a business
self.client().post('/businesses',
headers=dict(Authorization="Bearer " + access_token),
data=json.dumps(self.a_business),
content_type='application/json')
# first add a business
self.client().post('/businesses',
headers=dict(Authorization="Bearer " + access_token),
data=json.dumps(self.a_business2),
content_type='application/json')
# first add a business
self.client().post('/businesses',
headers=dict(Authorization="Bearer " + access_token),
data=json.dumps(self.a_business3),
content_type='application/json')
# first add a business
self.client().post('/businesses',
headers=dict(Authorization="Bearer " + access_token),
data=json.dumps(self.a_business4),
content_type='application/json')
# first add a business
self.client().post('/businesses',
headers=dict(Authorization="Bearer " + access_token),
data=json.dumps(self.a_business5),
content_type='application/json')
response = self.client().get('/businesses',
headers=dict(Authorization="Bearer " + access_token)
)
#check that a 201 response status code was returned
self.assertEqual(response.status_code, 201)
# check that XEDROX string in returned json response
self.assertIn('Xedrox', str(response.data))
def test_no_business(self):
"""Test the API works when no businesses are available (GET request)"""
# register a test user, then log them in
self._register_user()
result = self._login_user()
# obtain the access token
access_token = json.loads(result.data.decode())['access_token']
response = self.client().get('/businesses',
headers=dict(Authorization="Bearer " + access_token)
)
#check that a 201 response status code was returned
self.assertEqual(response.status_code, 201)
# check that an empty list is in returned json response
self.assertEqual([], json.loads(response.data.decode())["message"])
def test_no_token(self):
"""Test the API can get all businesses works when no token is supplied (GET request)"""
# register a test user, then log them in
self._register_user()
result = self._login_user()
# obtain the access token
access_token = json.loads(result.data.decode())['access_token']
# first add a business
self.client().post('/businesses',
headers=dict(Authorization="Bearer " + access_token),
data=json.dumps(self.a_business),
content_type='application/json')
response = self.client().get('/businesses',
#headers=dict(Authorization="Bearer " + access_token)
)
#check that a 404 response status code was returned
self.assertEqual(response.status_code, 403)
# check that Token required string in returned json response
self.assertIn('Please provide an Authorisation header', str(response.data))
def test_invalid_token(self):
"""Test the API can get all businesses works when invalid token is supplied (GET request)"""
# register a test user, then log them in
self._register_user()
result = self._login_user()
# obtain the access token
access_token = json.loads(result.data.decode())['access_token']
# first add a business
self.client().post('/businesses',
headers=dict(Authorization="Bearer " + access_token),
data=json.dumps(self.a_business),
content_type='application/json')
response = self.client().get('/businesses',
headers=dict(Authorization="Bearer " + access_token + "5432fr")
)
#check that a 404 response status code was returned
self.assertEqual(response.status_code, 403)
# check that Token required string in returned json response
self.assertIn('Invalid Token', str(response.data))
| 43.713115
| 100
| 0.557847
| 549
| 5,333
| 5.306011
| 0.167577
| 0.086852
| 0.0793
| 0.12839
| 0.848953
| 0.824923
| 0.824923
| 0.803296
| 0.803296
| 0.803296
| 0
| 0.00951
| 0.349334
| 5,333
| 122
| 101
| 43.713115
| 0.829971
| 0.228014
| 0
| 0.676471
| 0
| 0
| 0.103465
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.132353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
99591db249e6b167fd953324aef347be9596408a
| 50
|
py
|
Python
|
genesis/optimizer/__init__.py
|
TrentBrick/genesis
|
d80725b51b4b97fb5cddde7b7f0dc1362c11b26b
|
[
"MIT"
] | 12
|
2020-02-02T14:29:15.000Z
|
2021-09-12T08:05:43.000Z
|
genesis/optimizer/__init__.py
|
TrentBrick/genesis
|
d80725b51b4b97fb5cddde7b7f0dc1362c11b26b
|
[
"MIT"
] | 1
|
2022-01-04T08:04:00.000Z
|
2022-01-10T08:49:04.000Z
|
genesis/optimizer/__init__.py
|
johli/genesis
|
5424c1888d4330e505ad87412e7f1cc5dd828888
|
[
"MIT"
] | 3
|
2020-03-10T22:24:05.000Z
|
2021-05-05T13:23:01.000Z
|
from genesis.optimizer.genesis_optimizer import *
| 25
| 49
| 0.86
| 6
| 50
| 7
| 0.666667
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 50
| 1
| 50
| 50
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
510e7cec26b44ee1e4ee62c1e7e904a78e016d64
| 36
|
py
|
Python
|
bitjoy/utils/__init__.py
|
senavs/BitJoy
|
347538d69ed38df2082192e7991f09e9f94d3d11
|
[
"MIT"
] | null | null | null |
bitjoy/utils/__init__.py
|
senavs/BitJoy
|
347538d69ed38df2082192e7991f09e9f94d3d11
|
[
"MIT"
] | null | null | null |
bitjoy/utils/__init__.py
|
senavs/BitJoy
|
347538d69ed38df2082192e7991f09e9f94d3d11
|
[
"MIT"
] | null | null | null |
from .functions import int_to_bytes
| 18
| 35
| 0.861111
| 6
| 36
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5129fd2b424491144943776019a4fd748777aac7
| 55
|
py
|
Python
|
quantipy/core/tools/dp/__init__.py
|
encount/quantipy3
|
01fe350b79594ba162cd48ce91f6e547e74265fe
|
[
"MIT"
] | null | null | null |
quantipy/core/tools/dp/__init__.py
|
encount/quantipy3
|
01fe350b79594ba162cd48ce91f6e547e74265fe
|
[
"MIT"
] | null | null | null |
quantipy/core/tools/dp/__init__.py
|
encount/quantipy3
|
01fe350b79594ba162cd48ce91f6e547e74265fe
|
[
"MIT"
] | null | null | null |
from . import io
from . import prep
from . import query
| 18.333333
| 19
| 0.745455
| 9
| 55
| 4.555556
| 0.555556
| 0.731707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 55
| 3
| 19
| 18.333333
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
51414e2381dcb7c0741d472b02377f0c91b085db
| 72
|
py
|
Python
|
calculator/standard_widgets/standard_label.py
|
restless-dreamer/awesome-calculator
|
52c20d0f935cd6906b5020cbd69fb2d537b93efe
|
[
"MIT"
] | null | null | null |
calculator/standard_widgets/standard_label.py
|
restless-dreamer/awesome-calculator
|
52c20d0f935cd6906b5020cbd69fb2d537b93efe
|
[
"MIT"
] | 1
|
2021-07-27T21:08:10.000Z
|
2021-07-28T11:22:24.000Z
|
calculator/standard_widgets/standard_label.py
|
restless-dreamer/awesome-calculator
|
52c20d0f935cd6906b5020cbd69fb2d537b93efe
|
[
"MIT"
] | null | null | null |
from kivy.uix.label import Label
class StandardLabel(Label):
pass
| 12
| 32
| 0.75
| 10
| 72
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180556
| 72
| 5
| 33
| 14.4
| 0.915254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
514c867eeb60b8de54fcd7df896e0bddba3c87fa
| 34
|
py
|
Python
|
naivebayes/__init__.py
|
sahitpj/MachineLearning
|
2ce5a337ec432daff64a216df6847ef834bcb8d7
|
[
"MIT"
] | 2
|
2019-01-23T15:51:29.000Z
|
2019-02-01T16:50:33.000Z
|
naivebayes/__init__.py
|
sahitpj/MachineLearning
|
2ce5a337ec432daff64a216df6847ef834bcb8d7
|
[
"MIT"
] | null | null | null |
naivebayes/__init__.py
|
sahitpj/MachineLearning
|
2ce5a337ec432daff64a216df6847ef834bcb8d7
|
[
"MIT"
] | null | null | null |
from .gaussiannb import GaussianNB
| 34
| 34
| 0.882353
| 4
| 34
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
515146bc16cc7264e0e2fa2146a5e58c9400ff3d
| 16,178
|
py
|
Python
|
tests/components/system_bridge/test_config_flow.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/system_bridge/test_config_flow.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/system_bridge/test_config_flow.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Test the System Bridge config flow."""
import asyncio
from unittest.mock import patch
from systembridgeconnector.const import (
EVENT_DATA,
EVENT_MESSAGE,
EVENT_MODULE,
EVENT_SUBTYPE,
EVENT_TYPE,
SUBTYPE_BAD_API_KEY,
TYPE_DATA_UPDATE,
TYPE_ERROR,
)
from systembridgeconnector.exceptions import (
AuthenticationException,
ConnectionClosedException,
ConnectionErrorException,
)
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import zeroconf
from homeassistant.components.system_bridge.const import DOMAIN
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
FIXTURE_MAC_ADDRESS = "aa:bb:cc:dd:ee:ff"
FIXTURE_UUID = "e91bf575-56f3-4c83-8f42-70ac17adcd33"
FIXTURE_AUTH_INPUT = {CONF_API_KEY: "abc-123-def-456-ghi"}
FIXTURE_USER_INPUT = {
CONF_API_KEY: "abc-123-def-456-ghi",
CONF_HOST: "test-bridge",
CONF_PORT: "9170",
}
FIXTURE_ZEROCONF_INPUT = {
CONF_API_KEY: "abc-123-def-456-ghi",
CONF_HOST: "1.1.1.1",
CONF_PORT: "9170",
}
FIXTURE_ZEROCONF = zeroconf.ZeroconfServiceInfo(
host="test-bridge",
addresses=["1.1.1.1"],
port=9170,
hostname="test-bridge.local.",
type="_system-bridge._udp.local.",
name="System Bridge - test-bridge._system-bridge._udp.local.",
properties={
"address": "http://test-bridge:9170",
"fqdn": "test-bridge",
"host": "test-bridge",
"ip": "1.1.1.1",
"mac": FIXTURE_MAC_ADDRESS,
"port": "9170",
"uuid": FIXTURE_UUID,
},
)
FIXTURE_ZEROCONF_BAD = zeroconf.ZeroconfServiceInfo(
host="1.1.1.1",
addresses=["1.1.1.1"],
port=9170,
hostname="test-bridge.local.",
type="_system-bridge._udp.local.",
name="System Bridge - test-bridge._system-bridge._udp.local.",
properties={
"something": "bad",
},
)
FIXTURE_DATA_SYSTEM = {
EVENT_TYPE: TYPE_DATA_UPDATE,
EVENT_MESSAGE: "Data changed",
EVENT_MODULE: "system",
EVENT_DATA: {
"uuid": FIXTURE_UUID,
},
}
FIXTURE_DATA_SYSTEM_BAD = {
EVENT_TYPE: TYPE_DATA_UPDATE,
EVENT_MESSAGE: "Data changed",
EVENT_MODULE: "system",
EVENT_DATA: {},
}
FIXTURE_DATA_AUTH_ERROR = {
EVENT_TYPE: TYPE_ERROR,
EVENT_SUBTYPE: SUBTYPE_BAD_API_KEY,
EVENT_MESSAGE: "Invalid api-key",
}
async def test_show_user_form(hass: HomeAssistant) -> None:
"""Test that the setup form is served."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_user_flow(hass: HomeAssistant) -> None:
"""Test full user flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.system_bridge.config_flow.WebSocketClient.connect"
), patch("systembridgeconnector.websocket_client.WebSocketClient.get_data"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
return_value=FIXTURE_DATA_SYSTEM,
), patch(
"homeassistant.components.system_bridge.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "test-bridge"
assert result2["data"] == FIXTURE_USER_INPUT
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"systembridgeconnector.websocket_client.WebSocketClient.connect",
side_effect=ConnectionErrorException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_connection_closed_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle connection closed cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
side_effect=ConnectionClosedException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_timeout_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle timeout cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
side_effect=asyncio.TimeoutError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
side_effect=AuthenticationException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_uuid_error(hass: HomeAssistant) -> None:
"""Test we handle error from bad uuid."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
return_value=FIXTURE_DATA_SYSTEM_BAD,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass: HomeAssistant) -> None:
"""Test we handle unknown errors."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
async def test_reauth_authorization_error(hass: HomeAssistant) -> None:
"""Test we show user form on authorization error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
side_effect=AuthenticationException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "authenticate"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_reauth_connection_error(hass: HomeAssistant) -> None:
"""Test we show user form on connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
with patch(
"systembridgeconnector.websocket_client.WebSocketClient.connect",
side_effect=ConnectionErrorException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "authenticate"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_reauth_connection_closed_error(hass: HomeAssistant) -> None:
"""Test we show user form on connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
side_effect=ConnectionClosedException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "authenticate"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_reauth_flow(hass: HomeAssistant) -> None:
"""Test reauth flow."""
mock_config = MockConfigEntry(
domain=DOMAIN, unique_id=FIXTURE_UUID, data=FIXTURE_USER_INPUT
)
mock_config.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
return_value=FIXTURE_DATA_SYSTEM,
), patch(
"homeassistant.components.system_bridge.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_flow(hass: HomeAssistant) -> None:
"""Test zeroconf flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=FIXTURE_ZEROCONF,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert not result["errors"]
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
return_value=FIXTURE_DATA_SYSTEM,
), patch(
"homeassistant.components.system_bridge.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "1.1.1.1"
assert result2["data"] == FIXTURE_ZEROCONF_INPUT
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_cannot_connect(hass: HomeAssistant) -> None:
"""Test zeroconf cannot connect flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=FIXTURE_ZEROCONF,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert not result["errors"]
with patch(
"systembridgeconnector.websocket_client.WebSocketClient.connect",
side_effect=ConnectionErrorException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_AUTH_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "authenticate"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_zeroconf_bad_zeroconf_info(hass: HomeAssistant) -> None:
"""Test zeroconf cannot connect flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=FIXTURE_ZEROCONF_BAD,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
| 35.017316
| 88
| 0.707442
| 1,863
| 16,178
| 5.848631
| 0.076758
| 0.039464
| 0.10279
| 0.120411
| 0.854075
| 0.832324
| 0.828102
| 0.821127
| 0.802863
| 0.795888
| 0
| 0.010561
| 0.180616
| 16,178
| 461
| 89
| 35.093275
| 0.811406
| 0.002163
| 0
| 0.658263
| 0
| 0
| 0.240325
| 0.159701
| 0
| 0
| 0
| 0
| 0.19888
| 1
| 0
| false
| 0
| 0.028011
| 0
| 0.028011
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5ae27922af77c1be9a3ab74181fcb90437c87365
| 115
|
py
|
Python
|
shop/views/__init__.py
|
msfils/shareGit
|
3c0d219051c8d04137bf206b9e5b376358d7ba99
|
[
"Unlicense"
] | 2
|
2021-03-25T07:45:08.000Z
|
2021-11-11T15:44:27.000Z
|
shop/views/__init__.py
|
msfils/shareGit
|
3c0d219051c8d04137bf206b9e5b376358d7ba99
|
[
"Unlicense"
] | null | null | null |
shop/views/__init__.py
|
msfils/shareGit
|
3c0d219051c8d04137bf206b9e5b376358d7ba99
|
[
"Unlicense"
] | 3
|
2021-04-30T14:04:29.000Z
|
2022-03-31T14:34:59.000Z
|
from .basket import *
from .customers import *
from .general import *
from .orders import *
from .products import *
| 23
| 24
| 0.747826
| 15
| 115
| 5.733333
| 0.466667
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165217
| 115
| 5
| 25
| 23
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
850aa48b697fe0c280aed2af65fb66069750d348
| 29
|
py
|
Python
|
Scripts/stemmers/urd_stemmer.py
|
kavitharaju/AutoAligner
|
c890f0a74e1cc08e13d166c3b15a8d316359674a
|
[
"MIT"
] | null | null | null |
Scripts/stemmers/urd_stemmer.py
|
kavitharaju/AutoAligner
|
c890f0a74e1cc08e13d166c3b15a8d316359674a
|
[
"MIT"
] | null | null | null |
Scripts/stemmers/urd_stemmer.py
|
kavitharaju/AutoAligner
|
c890f0a74e1cc08e13d166c3b15a8d316359674a
|
[
"MIT"
] | null | null | null |
def stem(word):
return word
| 9.666667
| 15
| 0.724138
| 5
| 29
| 4.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 29
| 2
| 16
| 14.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5180c958d4be4b53f643486da8e388fc003bebfe
| 23
|
py
|
Python
|
iceplot/__init__.py
|
bainbrid/icenet
|
0b261dc97451fd7f896ed27f2b90dd2668e635ca
|
[
"MIT"
] | null | null | null |
iceplot/__init__.py
|
bainbrid/icenet
|
0b261dc97451fd7f896ed27f2b90dd2668e635ca
|
[
"MIT"
] | null | null | null |
iceplot/__init__.py
|
bainbrid/icenet
|
0b261dc97451fd7f896ed27f2b90dd2668e635ca
|
[
"MIT"
] | null | null | null |
from .iceplot import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
51928474b5209eeb957c0991bb3ce4e44ce25618
| 32
|
py
|
Python
|
intelligencelayer/shared/scene/__init__.py
|
MaleNurse/DeepStack
|
c2b9a90a821209ca5d9caa4a12cc0e7bb81bd090
|
[
"Apache-2.0"
] | 353
|
2020-12-10T10:47:17.000Z
|
2022-03-31T23:08:29.000Z
|
deepstack/intelligencelayer/shared/scene/__init__.py
|
OlafenwaMoses/DeepStack-1
|
0315e48907c36c075da5aa558756786c0d76c1b8
|
[
"Apache-2.0"
] | 80
|
2020-12-10T09:54:22.000Z
|
2022-03-30T22:08:45.000Z
|
deepstack/intelligencelayer/shared/scene/__init__.py
|
OlafenwaMoses/DeepStack-1
|
0315e48907c36c075da5aa558756786c0d76c1b8
|
[
"Apache-2.0"
] | 63
|
2020-12-10T17:10:34.000Z
|
2022-03-28T16:27:07.000Z
|
from .process import SceneModel
| 16
| 31
| 0.84375
| 4
| 32
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
51cfe4577759f71b84d0ff9b868b50f61ee3872f
| 947
|
py
|
Python
|
tests/test_report_writers.py
|
agrc/reporter
|
277f14a477b9c68cec090a8a7f7f522c1dd719f0
|
[
"MIT"
] | null | null | null |
tests/test_report_writers.py
|
agrc/reporter
|
277f14a477b9c68cec090a8a7f7f522c1dd719f0
|
[
"MIT"
] | 8
|
2020-09-28T16:45:45.000Z
|
2020-10-22T14:53:17.000Z
|
tests/test_report_writers.py
|
agrc/reporter
|
277f14a477b9c68cec090a8a7f7f522c1dd719f0
|
[
"MIT"
] | null | null | null |
from reporter import report_writers
def test_list_of_dicts_to_csv_gets_columns_right(mocker, tmp_path):
test_data = [{'foo': 1, 'bar': 2}, {'bar': 4, 'foo': 3}]
out_path = tmp_path / 'test.csv'
mock_datetime = mocker.patch('datetime.datetime')
mock_datetime.now.return_value.strftime.return_value = 'foo_date'
report_writers.list_of_dicts_to_csv(test_data, out_path)
content = out_path.read_text()
assert content == 'foo_date,\nfoo,bar\n1,2\n3,4\n'
def test_list_of_dicts_to_rotating_logger_correct_output(mocker, tmp_path):
test_data = [{'foo': 1, 'bar': 2}, {'bar': 4, 'foo': 3}]
out_path = tmp_path / 'test.csv'
mock_datetime = mocker.patch('datetime.datetime')
mock_datetime.now.return_value.strftime.return_value = 'foo_date'
report_writers.list_of_dicts_to_rotating_logger(test_data, out_path)
content = out_path.read_text()
assert content == 'foo_date\nfoo|bar\n1|2\n3|4\n'
| 33.821429
| 75
| 0.720169
| 150
| 947
| 4.173333
| 0.306667
| 0.067093
| 0.070288
| 0.083067
| 0.904153
| 0.894569
| 0.785942
| 0.785942
| 0.785942
| 0.785942
| 0
| 0.019729
| 0.143611
| 947
| 27
| 76
| 35.074074
| 0.752158
| 0
| 0
| 0.588235
| 0
| 0
| 0.157339
| 0.062302
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cfe8d8463c4e5ab2b70bdbeea471cf1700515bd4
| 248
|
py
|
Python
|
ptstructure/vqa/pytorchnlp/transformers/__init__.py
|
Amanda-Barbara/PaddleOCR2Pytorch
|
7f2c85f23b13981a48a37cb90160dcd69cf21260
|
[
"Apache-2.0"
] | null | null | null |
ptstructure/vqa/pytorchnlp/transformers/__init__.py
|
Amanda-Barbara/PaddleOCR2Pytorch
|
7f2c85f23b13981a48a37cb90160dcd69cf21260
|
[
"Apache-2.0"
] | null | null | null |
ptstructure/vqa/pytorchnlp/transformers/__init__.py
|
Amanda-Barbara/PaddleOCR2Pytorch
|
7f2c85f23b13981a48a37cb90160dcd69cf21260
|
[
"Apache-2.0"
] | null | null | null |
from .model_utils import PretrainedModel, register_base_model
from .tokenizer_utils import PretrainedTokenizer
from .layoutxlm.tokenizer import *
from .layoutxlm.modeling import *
from .layoutlm.modeling import *
from .layoutlm.tokenizer import *
| 31
| 61
| 0.834677
| 29
| 248
| 7
| 0.413793
| 0.147783
| 0.17734
| 0.256158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104839
| 248
| 8
| 62
| 31
| 0.914414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cfecbe8fa2712476071202fe4b65eb2d2c03be83
| 25
|
py
|
Python
|
plugins/pelican-linkclass/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 13
|
2020-01-27T09:02:25.000Z
|
2022-01-20T07:45:26.000Z
|
plugins/pelican-linkclass/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 29
|
2020-03-22T06:57:57.000Z
|
2022-01-24T22:46:42.000Z
|
plugins/pelican-linkclass/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 6
|
2020-07-10T00:13:30.000Z
|
2022-01-26T08:22:33.000Z
|
from .linkclass import *
| 12.5
| 24
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3210d8f7f3e3e87ef270652e134db51aaa084e1e
| 167
|
py
|
Python
|
typeform_feedback/signals_define.py
|
exolever/django-typeform-feedback
|
5784523b880e4890172b9f61d848187f5c24237e
|
[
"MIT"
] | null | null | null |
typeform_feedback/signals_define.py
|
exolever/django-typeform-feedback
|
5784523b880e4890172b9f61d848187f5c24237e
|
[
"MIT"
] | 15
|
2019-03-22T09:04:53.000Z
|
2019-12-13T08:15:10.000Z
|
typeform_feedback/signals_define.py
|
exolever/django-typeform-feedback
|
5784523b880e4890172b9f61d848187f5c24237e
|
[
"MIT"
] | null | null | null |
from django.dispatch import Signal
new_user_typeform_response = Signal(providing_args=['uuid', 'response'])
user_response_approved = Signal(providing_args=['uuid'])
| 27.833333
| 72
| 0.802395
| 21
| 167
| 6.047619
| 0.619048
| 0.23622
| 0.299213
| 0.362205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077844
| 167
| 5
| 73
| 33.4
| 0.824675
| 0
| 0
| 0
| 0
| 0
| 0.095808
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
321c740a45f8a7aae06477c15320191a493f6dad
| 17,043
|
py
|
Python
|
neural_models/modules/gnn_multi_head_attention.py
|
JasperGuo/MeaningRepresentationBenchmark
|
b61e8ed68fdbd934c195fa968445540bfa897f2f
|
[
"MIT"
] | 9
|
2020-11-11T08:54:05.000Z
|
2022-03-22T11:16:03.000Z
|
neural_models/modules/gnn_multi_head_attention.py
|
JasperGuo/MeaningRepresentationBenchmark
|
b61e8ed68fdbd934c195fa968445540bfa897f2f
|
[
"MIT"
] | null | null | null |
neural_models/modules/gnn_multi_head_attention.py
|
JasperGuo/MeaningRepresentationBenchmark
|
b61e8ed68fdbd934c195fa968445540bfa897f2f
|
[
"MIT"
] | 2
|
2021-01-14T08:25:25.000Z
|
2021-06-08T21:41:32.000Z
|
# coding=utf8
import math
import torch
import numpy as np
import torch.nn as nn
from allennlp.nn import util
from torch.nn import Parameter
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
class GNNMatrixMultiHeadAttention(nn.Module):
def __init__(self, d_model: int, nhead: int, nlabels: int,
dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._nlabels = nlabels
self._d_q = int(d_model / nhead)
self._w_q = nn.Linear(d_model, d_model)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_ks = Parameter(torch.Tensor(nlabels, d_model, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_ks)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, nlabels)
:param padding_mask: (batch_size, len_q, len_k)
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
# shape: (nlabels, batch_size, len_q, len_k)
mask = edge_mask.permute(3, 0, 1, 2)
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# shape: (nhead * sz_b, len_k, d_q)
edge_values = list()
attention_weights = list()
for i in range(self._nlabels):
w = self._w_ks[i]
ek = F.linear(k, w).view(sz_b, len_k, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_k, d_q)
ek = ek.permute(2, 0, 1, 3).contiguous().view(-1, len_k, self._d_q)
edge_values.append(ek)
aw = query.bmm(ek.permute(0, 2, 1))
attention_weights.append(aw / self._attention_temperature)
# (nlabels, sz_b * nhead, len_q, len_k)
attention_weights = torch.stack(attention_weights, dim=0)
# (nlabels, sz_b * nhead, len_q, len_k)
attention_weights = attention_weights * mask.repeat(1, self._nhead, 1, 1)
attention_weights = attention_weights.sum(dim=0)
# shape: (nhead * sz_b, len_q, len_k)
attention_weights = attention_weights.masked_fill(
padding_mask.repeat(self._nhead, 1, 1).bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = self._attn_dropout(attention_weights)
output = attention_weights.new_zeros((self._nhead * sz_b, len_q, self._d_q))
for i in range(self._nlabels):
v, m = edge_values[i], mask[i]
_m = m.repeat(self._nhead, 1, 1)
output += (attention_weights * _m).bmm(v)
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output
class GNNVectorMultiHeadAttention(nn.Module):
def __init__(self, d_model: int, nhead: int, nlabels: int,
dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._nlabels = nlabels
self._d_q = int(d_model / nhead)
self._w_q = nn.Linear(d_model, d_model)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_k = Parameter(torch.Tensor(d_model, d_model))
self._w_v = Parameter(torch.Tensor(d_model, d_model))
self._b_ks = Parameter(torch.Tensor(self._nlabels, d_model))
self._b_vs = Parameter(torch.Tensor(self._nlabels, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_k)
xavier_uniform_(self._w_v)
xavier_uniform_(self._b_ks)
xavier_uniform_(self._b_vs)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, nlabels)
:param padding_mask: (batch_size, len_q, len_k), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
self._w_k.to(k.device)
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# key
edge_vectors = torch.mm(edge_mask.reshape(-1, self._nlabels), self._b_ks).reshape(sz_b, len_q, len_k,
self._d_model)
# shape: (sz_b, len_k, d_model)
key = F.linear(k, self._w_k)
# shape: (sz_b, len_q, len_k, d_model)
key = key.unsqueeze(1).repeat(1, len_q, 1, 1)
key = edge_vectors + key
key = key.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# shape: (nhead * sz_b, len_q, len_k, d_q)
key = key.contiguous().view(-1, len_q, len_k, self._d_q)
mask = (edge_mask.sum(-1) > 0).float().repeat(self._nhead, 1, 1)
# shape: (nhead * sz_b, len_q, len_k)
attention_weights = torch.mul(query.unsqueeze(2).repeat(1, 1, len_k, 1), key).sum(-1)
attention_weights = attention_weights / self._attention_temperature
attention_weights = attention_weights * mask
attention_weights = attention_weights.masked_fill(
padding_mask.repeat(self._nhead, 1, 1).bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = self._attn_dropout(attention_weights)
# value
# shape: (sz_b, len_k, d_model)
# value = F.linear(k, self._w_v)
# # shape: (sz_b, len_q, len_k, d_model)
# value = value.unsqueeze(1).repeat(1, len_q, 1, 1)
# value = edge_vectors + value
# value = value.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# # shape: (nhead * sz_b, len_q, len_k, d_q)
# value = value.contiguous().view(-1, len_q, len_k, self._d_q)
value = key
output = ((attention_weights * mask).unsqueeze(-1) * value).sum(2)
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output
class GNNVectorMultiHeadAttention2(nn.Module):
"""
Implementation based on "Self-Attention with Relative Position Representations"
According to Tensor2Tensor
https://github.com/tensorflow/tensor2tensor/blob/ab918e0d9592394614aa2e10cfc8f23e8cb24dfc/tensor2tensor/layers/common_attention.py
"""
def __init__(self, d_model: int, nhead: int, nlabels: int,
dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._nlabels = nlabels
self._d_q = int(d_model / nhead)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_q = nn.Linear(d_model, d_model)
self._w_k = Parameter(torch.Tensor(d_model, d_model))
self._w_v = Parameter(torch.Tensor(d_model, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._b_ks = Parameter(torch.Tensor(self._nlabels, self._d_q))
self._b_vs = Parameter(torch.Tensor(self._nlabels, self._d_q))
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_k)
xavier_uniform_(self._w_v)
xavier_uniform_(self._b_ks)
xavier_uniform_(self._b_vs)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, nlabels)
:param padding_mask:(batch_size, len_q, len_k), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
self._w_k.to(k.device)
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# shape: (nhead * sz_b, len_q, len_k, d_q)
expanded_query = query.unsqueeze(2).repeat(1, 1, len_k, 1)
# Relation Embeddings
# shape: (sz_b, len_q, len_k, d_q)
key_relation_embeded = torch.mm(edge_mask.reshape(-1, self._nlabels), self._b_ks).reshape(sz_b, len_q, len_k,
self._d_q)
# shape: (nhead * sz_b, len_q, len_k, d_q)
key_relation_embeded = key_relation_embeded.repeat(self._nhead, 1, 1, 1)
# shape: (sz_b, len_k, d_model)
key = F.linear(k, self._w_k)
# shape: (nhead * sz_b, len_k, d_q)
key = key.view(sz_b, len_k, self._nhead, self._d_q).permute(2, 0, 1, 3).contiguous().view(-1, len_k, self._d_q)
# shape: (nhead * sz_b, len_q, len_k)
qk_weights = query.bmm(key.permute(0, 2, 1))
# shape: (nhead * sz_b, len_q, len_k)
qkr_weights = torch.mul(expanded_query, key_relation_embeded).sum(-1)
attention_weights = qk_weights + qkr_weights
output_attention_weights = attention_weights / self._attention_temperature
# attention_weights = attention_weights.masked_fill(
# padding_mask.repeat(self._nhead, 1, 1).bool(),
# float('-inf'),
# )
# relation mask
# shape: (nhead * sz_b, len_q, len_k)
# Note that we need ensure that there are at least one relations for each position
# eye_mask = torch.eye(len_q).unsqueeze(0).repeat(sz_b, 1, 1).to(edge_mask.device)
# relation_mask = ((edge_mask.sum(-1) + eye_mask + (1 - padding_mask)) == 0).repeat(self._nhead, 1, 1)
relation_mask = ((edge_mask.sum(-1) + (1 - padding_mask)) == 0).repeat(self._nhead, 1, 1)
attention_weights = output_attention_weights.masked_fill(
relation_mask.bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = attention_weights.masked_fill(
relation_mask.bool(),
0.0
)
# Remove nan
# attention_weights[attention_weights != attention_weights] = 0
attention_weights = self._attn_dropout(attention_weights)
# Value Relation Embeddings
# shape: (sz_b, len_q, len_k, d_q)
value_relation_embeded = torch.mm(edge_mask.reshape(-1, self._nlabels), self._b_vs).reshape(sz_b, len_q, len_k,
self._d_q)
# shape: (nhead * sz_b, len_q, len_k, d_q)
value_relation_embeded = value_relation_embeded.repeat(self._nhead, 1, 1, 1)
# shape: (sz_b, len_k, d_model)
value = F.linear(k, self._w_v)
# shape: (nhead * sz_b, len_k, d_q)
value = value.view(sz_b, len_k, self._nhead, self._d_q).permute(2, 0, 1, 3).contiguous().view(-1, len_k,
self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
qv_output = attention_weights.bmm(value)
# shape: (nhead * sz_b, len_q, d_q)
qvr_output = torch.mul(attention_weights.unsqueeze(-1), value_relation_embeded).sum(2)
output = qv_output + qvr_output
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output, output_attention_weights
class GNNVectorContinuousMultiHeadAttention(nn.Module):
def __init__(self, d_model: int, nhead: int, dropout: float = 0.1):
super().__init__()
assert d_model % nhead == 0
self._d_model = d_model
self._nhead = nhead
self._d_q = int(d_model / nhead)
self._w_q = nn.Linear(d_model, d_model)
self._attention_temperature = np.power(self._d_q, 0.5)
self._w_k = Parameter(torch.Tensor(d_model, d_model))
self._w_v = Parameter(torch.Tensor(d_model, d_model))
self._w_h = nn.Linear(d_model, d_model)
self._dropout = nn.Dropout(dropout)
self._attn_dropout = nn.Dropout(dropout)
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self._w_q.weight)
xavier_uniform_(self._w_h.weight)
xavier_uniform_(self._w_k)
xavier_uniform_(self._w_v)
def forward(self, q: torch.Tensor, k: torch.Tensor, edge_mask: torch.Tensor,
padding_mask: torch.Tensor):
"""
q and k must have the same dimension
:param q: (batch_size, len_q, d_model)
:param k: (batch_size, len_k, d_model)
:param edge_mask: (batch_size, len_q, len_k, d_model)
:param padding_mask: (batch_size, len_q, len_k), where True values are positions that should be masked
with float('-inf') and False values will be unchanged.
:return:
shape: (batch_size, len_q, d_model)
"""
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
# query
query = self._w_q(q).view(sz_b, len_q, self._nhead, self._d_q)
# shape: (nhead * sz_b, len_q, d_q)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self._d_q)
# key
# shape: (sz_b, len_k, d_model)
key = F.linear(k, self._w_k)
# shape: (sz_b, len_q, len_k, d_model)
key = key.unsqueeze(1).repeat(1, len_q, 1, 1)
key = edge_mask + key
key = key.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# shape: (nhead * sz_b, len_q, len_k, d_q)
key = key.contiguous().view(-1, len_q, len_k, self._d_q)
# shape: (nhead * sz_b, len_q, len_k)
attention_weights = torch.mul(query.unsqueeze(2).repeat(1, 1, len_k, 1), key).sum(-1)
attention_weights = attention_weights / self._attention_temperature
attention_weights = attention_weights.masked_fill(
padding_mask.repeat(self._nhead, 1, 1).bool(),
float('-inf'),
)
attention_weights = F.softmax(attention_weights, dim=-1)
attention_weights = self._attn_dropout(attention_weights)
# value
# shape: (sz_b, len_k, d_model)
value = F.linear(k, self._w_v)
# shape: (sz_b, len_q, len_k, d_model)
value = value.unsqueeze(1).repeat(1, len_q, 1, 1)
value = edge_mask + value
value = value.view(sz_b, len_q, len_k, self._nhead, self._d_q).permute(3, 0, 1, 2, 4)
# shape: (nhead * sz_b, len_q, len_k, d_q)
value = value.contiguous().view(-1, len_q, len_k, self._d_q)
# shape: (nhead * sz_b, len_q, d_p)
output = (attention_weights.unsqueeze(-1) * value).sum(2)
output = output.view(self._nhead, sz_b, len_q, self._d_q)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
output = self._w_h(output)
return output
| 41.772059
| 134
| 0.600364
| 2,491
| 17,043
| 3.765957
| 0.065034
| 0.034964
| 0.042853
| 0.037309
| 0.854493
| 0.837544
| 0.818996
| 0.809935
| 0.790427
| 0.77124
| 0
| 0.018012
| 0.276829
| 17,043
| 407
| 135
| 41.874693
| 0.743124
| 0.228657
| 0
| 0.696581
| 0
| 0
| 0.001255
| 0
| 0
| 0
| 0
| 0
| 0.017094
| 1
| 0.051282
| false
| 0
| 0.034188
| 0
| 0.119658
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5c8e07ae4bf60ff25f1916c576b16ee0bb274313
| 141
|
py
|
Python
|
ppo/env_wrapper/__init__.py
|
emasquil/ppo
|
83b54926ea69244d382bfb958271718932894eb0
|
[
"MIT"
] | null | null | null |
ppo/env_wrapper/__init__.py
|
emasquil/ppo
|
83b54926ea69244d382bfb958271718932894eb0
|
[
"MIT"
] | 35
|
2022-03-01T10:05:50.000Z
|
2022-03-30T20:37:22.000Z
|
ppo/env_wrapper/__init__.py
|
emasquil/ppo
|
83b54926ea69244d382bfb958271718932894eb0
|
[
"MIT"
] | null | null | null |
from .pendulum_wrapper import PendulumEnv
from .reacher_wrapper import ReacherEnv
from .inverted_pendulum_wrapper import InvertedPendulumEnv
| 35.25
| 58
| 0.893617
| 16
| 141
| 7.625
| 0.5625
| 0.319672
| 0.344262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 141
| 3
| 59
| 47
| 0.945736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7a26c8c29761e64ed21987655847f3072afc7230
| 77
|
py
|
Python
|
src/mdp/abstraction/__init__.py
|
rbankosegger/RLASP-core
|
fcd01b9da946e4d37ae9329cd1736bccde178a3b
|
[
"MIT"
] | null | null | null |
src/mdp/abstraction/__init__.py
|
rbankosegger/RLASP-core
|
fcd01b9da946e4d37ae9329cd1736bccde178a3b
|
[
"MIT"
] | null | null | null |
src/mdp/abstraction/__init__.py
|
rbankosegger/RLASP-core
|
fcd01b9da946e4d37ae9329cd1736bccde178a3b
|
[
"MIT"
] | null | null | null |
from .carcass import Carcass
from .carcass_mdp_builder import CarcassBuilder
| 25.666667
| 47
| 0.87013
| 10
| 77
| 6.5
| 0.6
| 0.338462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 77
| 2
| 48
| 38.5
| 0.942029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7a3ab80470dbb92dcc4a3df65cc0af9cf3f7c6a5
| 4,519
|
py
|
Python
|
test/test_matcher/test_limit.py
|
Miksus/ecosys
|
f30b21b10340b5ac92acc8bb34eb3d4ead3bff51
|
[
"MIT"
] | 2
|
2022-02-28T16:23:23.000Z
|
2022-03-16T21:57:07.000Z
|
test/test_matcher/test_limit.py
|
Miksus/ecosys
|
f30b21b10340b5ac92acc8bb34eb3d4ead3bff51
|
[
"MIT"
] | null | null | null |
test/test_matcher/test_limit.py
|
Miksus/ecosys
|
f30b21b10340b5ac92acc8bb34eb3d4ead3bff51
|
[
"MIT"
] | null | null | null |
import pytest
import sys
sys.path.append('..')
from ecosys.trading_platform.matcher.stockmarket import StockMatcher
def test_fulfilling_equal():
market = StockMatcher()
market.place_bid(price=5.0, quantity=200, party="Bidder")
market.place_ask(price=5.0, quantity=200, party="Asker")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (5.0 == market.last_price) and (0 == bid_quantity) and (0 == ask_quantity)
def test_fulfilling_equal_decimals():
market = StockMatcher()
market.place_bid(price=5.55, quantity=200, party="Bidder")
market.place_ask(price=5.55, quantity=200, party="Asker")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (5.55 == market.last_price) and (0 == bid_quantity) and (0 == ask_quantity)
def test_fulfilling_equal_too_many_decimals():
# Ticks should be 2 decimals
market = StockMatcher()
market.place_bid(price=5.556, quantity=200, party="Bidder")
market.place_ask(price=5.556, quantity=200, party="Asker")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (5.56 == market.last_price) and (0 == bid_quantity) and (0 == ask_quantity)
def test_fulfilling_unequal():
market = StockMatcher()
market.place_bid(price=6.0, quantity=200, party="Bidder")
market.place_ask(price=4.0, quantity=200, party="Asker")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (5.0 == market.last_price) and (0 == bid_quantity) and (0 == ask_quantity)
def test_unfulfilling():
market = StockMatcher()
market.place_bid(price=4.0, quantity=200, party="Bidder")
market.place_ask(price=6.0, quantity=200, party="Asker")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (market.last_price is None) and (200 == bid_quantity) and (200 == ask_quantity)
def test_oversupply():
market = StockMatcher()
market.place_bid(price=6.0, quantity=200, party="Bidder")
market.place_ask(price=5.0, quantity=200, party="Asker")
market.place_ask(price=5.0, quantity=200, party="Asker")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (5.5 == market.last_price) and (0 == bid_quantity) and (200 == ask_quantity)
def test_overdemand():
market = StockMatcher()
market.place_ask(price=5.0, quantity=200, party="Asker")
market.place_bid(price=6.0, quantity=200, party="Bidder")
market.place_bid(price=6.0, quantity=200, party="Bidder")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (5.5 == market.last_price) and (200 == bid_quantity) and (0 == ask_quantity)
def test_bid_priority():
market = StockMatcher()
market.place_ask(price=5.0, quantity=500, party="Asker")
market.place_bid(price=1.0, quantity=100, party="Bidder")
market.place_bid(price=6.0, quantity=500, party="Best Bidder")
market.place_bid(price=1.0, quantity=100, party="Bidder")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (5.5 == market.last_price) and (200 == bid_quantity) and (0 == ask_quantity)
def test_partial_fill():
market = StockMatcher()
market.place_ask(price=2.0, quantity=300, party="Asker")
market.place_bid(price=5.0, quantity=100, party="Bidder")
market.place_bid(price=6.0, quantity=100, party="Bidder")
market.place_bid(price=3.0, quantity=100, party="Last Bidder")
market.place_bid(price=1.0, quantity=100, party="Unfilled Bidder")
market.clear()
bid_quantity = market.order_book["limit"]["bid"]["quantity"].sum()
ask_quantity = market.order_book["limit"]["ask"]["quantity"].sum()
assert (2.5 == market.last_price) and (100 == bid_quantity) and (0 == ask_quantity)
| 35.865079
| 90
| 0.681567
| 634
| 4,519
| 4.687697
| 0.100946
| 0.099933
| 0.115074
| 0.1393
| 0.901077
| 0.892665
| 0.847914
| 0.828398
| 0.773553
| 0.698183
| 0
| 0.046428
| 0.142067
| 4,519
| 126
| 91
| 35.865079
| 0.720144
| 0.005753
| 0
| 0.60241
| 0
| 0
| 0.099978
| 0
| 0
| 0
| 0
| 0
| 0.108434
| 1
| 0.108434
| false
| 0
| 0.036145
| 0
| 0.144578
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8fdaee2856c76c59d2bdba810a3795c94f6b0eab
| 42
|
py
|
Python
|
duimap/__init__.py
|
bildzeitung/duimap
|
1626da81d2d4e015778e6ac882fdfed589052cfe
|
[
"MIT"
] | 1
|
2016-04-14T15:16:34.000Z
|
2016-04-14T15:16:34.000Z
|
duimap/__init__.py
|
bildzeitung/duimap
|
1626da81d2d4e015778e6ac882fdfed589052cfe
|
[
"MIT"
] | null | null | null |
duimap/__init__.py
|
bildzeitung/duimap
|
1626da81d2d4e015778e6ac882fdfed589052cfe
|
[
"MIT"
] | null | null | null |
from _version import __version__, __sha__
| 21
| 41
| 0.857143
| 5
| 42
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.72973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
64ecc6740075a2463f1a7a1b5ae10a42a70ea5d6
| 11,043
|
py
|
Python
|
ExamAutoGraderProcessor/src/test_autoGrader.py
|
coder4520/automated-exam-grader
|
aa00e2a66c03597c0785037eb31f6e81bc064b0b
|
[
"MIT"
] | null | null | null |
ExamAutoGraderProcessor/src/test_autoGrader.py
|
coder4520/automated-exam-grader
|
aa00e2a66c03597c0785037eb31f6e81bc064b0b
|
[
"MIT"
] | 1
|
2021-03-09T23:36:37.000Z
|
2021-03-09T23:36:37.000Z
|
ExamAutoGraderProcessor/src/test_autoGrader.py
|
coder4520/automated-exam-grader
|
aa00e2a66c03597c0785037eb31f6e81bc064b0b
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from src.autograder.grader import AutoGrader
import cv2
import os.path
import numpy as np
class TestAutoGrader(TestCase):
HERE = os.path.dirname(os.path.abspath(__file__))
IMAGES_DIR = os.path.join(HERE, 'images/')
image_url = IMAGES_DIR + "11.jpeg"
grader = AutoGrader()
image = cv2.imread(image_url)
def test_valid_image_exists(self):
""" Checks if image is valid """
self.assertEqual(type(self.image), np.ndarray)
def test_write_nothing(self):
""" student writes nothing => points =0 """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin"],
"points": 1
},
],
},
]
cropped_answer = ""
points = self.grader.process_exam_sheets(self.image, exam_sheets,cropped_answer)
assert (points == 0)
def test_write_wrong_answer(self):
""" student writes wrong keyword => points = 0 """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin"],
"points": 1
},
],
},
]
cropped_answer = "123"
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 0)
def test_write_exact_answer(self):
""" student writes exact keywords = full points """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin"],
"points": 1
},
],
},
]
cropped_answer = "cashin"
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 1)
def test_write_correct_but_extra(self):
""" student writes keywords but extra = full points """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin"],
"points": 1
},
],
},
]
cropped_answer = "cashin cashout"
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 1)
def test_write_correct_but_extra(self):
""" student writes correct keywords but reversed order """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin cashout"],
"points": 2
},
],
},
]
cropped_answer = "cashout cashin"
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 2)
def test_write_quarter_correct_keywords(self):
""" student writes quarter keywords of needed keywords => points /4 """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin cashout cashfoo cash"],
"points": 2
},
],
},
]
cropped_answer = "cashout"
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 0.5)
def test_write_half_correct_keywords(self):
""" student writes half keywords of needed keywords => points / 2 """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin cashout cashfoo cash"],
"points": 2
},
],
},
]
cropped_answer = "cashout cash"
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 1)
def test_write_half_correct_keywords(self):
""" student writes 3/4 keywords of needed keywords => 3/4 points """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin cashout cashfoo cash"],
"points": 2
},
],
},
]
cropped_answer = "cashout cash cashfoo"
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 1.5)
def test_write_all_correct_keywords(self):
""" student writes all keywords of needed keywords => full points """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin cashout cashfoo cash"],
"points": 2
},
],
},
]
cropped_answer = "cashout cash cashfoo cashin"
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 2)
def test_write_random_correct_keywords(self):
""" student writes all keywords of needed keywords => full points """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin cashout cashfoo cash 1 2 3 4 5 6"],
"points": 1
},
],
},
]
cropped_answer = "cashout"
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 0.1)
def test_write_stopping_keywords(self):
""" student writes all keywords of needed keywords => full points """
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin cashout cashfoo cash 1 2 3 4 5 6"],
"points": 1
},
],
},
]
cropped_answer = "a an der die das , '' . ok notOK well very well cashout 1 ok not ok ...."
points = self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer)
assert (points == 0.2)
def test_process_exam_sheets(self):
exam_sheets = [
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 1,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin"],
"points": 1
},
{
"question_no": 2,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin cashout"],
"points": 2
}
],
},
{
"matriculation_no": [108, 186, 926, 286],
"semester": [208, 187, 926, 286],
"course": [308, 186, 926, 286],
"answers": [
{
"question_no": 3,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin"],
"points": 3
},
{
"question_no": 4,
"answer_coordinates": [108, 186, 926, 286],
"possible_answers": ["cashin cashout"],
"points": 4
}
],
},
]
self.grader.process_exam_sheets(self.image, exam_sheets, cropped_answer="cashin cashout")
| 34.946203
| 99
| 0.435751
| 957
| 11,043
| 4.838036
| 0.107628
| 0.069978
| 0.079698
| 0.07257
| 0.8473
| 0.82311
| 0.82311
| 0.82311
| 0.82311
| 0.804968
| 0
| 0.118488
| 0.448972
| 11,043
| 315
| 100
| 35.057143
| 0.642399
| 0.05705
| 0
| 0.590909
| 0
| 0
| 0.16636
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.049242
| false
| 0
| 0.018939
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8f03097049dcb73523d77ef7302e1e7ac4cb3d35
| 58
|
py
|
Python
|
package_eg_test.py
|
myaTheingi/python-exercise
|
c348e17a35e19103e95a5f00e3980db05356d5be
|
[
"MIT"
] | null | null | null |
package_eg_test.py
|
myaTheingi/python-exercise
|
c348e17a35e19103e95a5f00e3980db05356d5be
|
[
"MIT"
] | null | null | null |
package_eg_test.py
|
myaTheingi/python-exercise
|
c348e17a35e19103e95a5f00e3980db05356d5be
|
[
"MIT"
] | null | null | null |
import package-example.ex1
package-example.ex1.convert()
| 14.5
| 29
| 0.810345
| 8
| 58
| 5.875
| 0.625
| 0.595745
| 0.723404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.068966
| 58
| 3
| 30
| 19.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8f59724fdb7c50ae8af7fa694c56215226fc3cc3
| 80
|
py
|
Python
|
dvdp/utils/apt_package/__init__.py
|
davidvdp/utils
|
58d91e0ff1608ecd2b518fe9f511ec43234c0f40
|
[
"MIT"
] | null | null | null |
dvdp/utils/apt_package/__init__.py
|
davidvdp/utils
|
58d91e0ff1608ecd2b518fe9f511ec43234c0f40
|
[
"MIT"
] | null | null | null |
dvdp/utils/apt_package/__init__.py
|
davidvdp/utils
|
58d91e0ff1608ecd2b518fe9f511ec43234c0f40
|
[
"MIT"
] | null | null | null |
from dvdp.utils.apt_package.apt_package import create_package as create_package
| 40
| 79
| 0.8875
| 13
| 80
| 5.153846
| 0.615385
| 0.298507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 80
| 1
| 80
| 80
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
56b4ce552e820f242e79fd3c5efa9f4c12226a9f
| 22
|
py
|
Python
|
core/python/src/moveit/task_constructor/stages.py
|
gavanderhoorn/moveit_task_constructor
|
6eb8b0d64c82240c1a04149e01cd3a136c549232
|
[
"BSD-3-Clause"
] | null | null | null |
core/python/src/moveit/task_constructor/stages.py
|
gavanderhoorn/moveit_task_constructor
|
6eb8b0d64c82240c1a04149e01cd3a136c549232
|
[
"BSD-3-Clause"
] | null | null | null |
core/python/src/moveit/task_constructor/stages.py
|
gavanderhoorn/moveit_task_constructor
|
6eb8b0d64c82240c1a04149e01cd3a136c549232
|
[
"BSD-3-Clause"
] | null | null | null |
from _stages import *
| 11
| 21
| 0.772727
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
56c29d9b924248822f6b5a8a1b95c20649721f73
| 38
|
py
|
Python
|
__init__.py
|
fabiomix/odoo-reload-translations
|
4643518f4dd801e79d97052cc3abab4f5c606ec7
|
[
"MIT"
] | null | null | null |
__init__.py
|
fabiomix/odoo-reload-translations
|
4643518f4dd801e79d97052cc3abab4f5c606ec7
|
[
"MIT"
] | null | null | null |
__init__.py
|
fabiomix/odoo-reload-translations
|
4643518f4dd801e79d97052cc3abab4f5c606ec7
|
[
"MIT"
] | 1
|
2018-04-29T10:40:18.000Z
|
2018-04-29T10:40:18.000Z
|
# -*- coding: utf-8 -*-
import wizard
| 12.666667
| 23
| 0.578947
| 5
| 38
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.184211
| 38
| 2
| 24
| 19
| 0.677419
| 0.552632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7129b6a5c1de43885997d4bd0a27d6b836d4650d
| 31
|
py
|
Python
|
model/tlnet/__init__.py
|
LK-Peng/CNN-based-Cloud-Detection-Methods
|
1393a6886e62f1ed5a612d57c5a725c763a6b2cc
|
[
"MIT"
] | 2
|
2022-02-16T03:30:19.000Z
|
2022-03-18T08:02:39.000Z
|
model/tlnet/__init__.py
|
LK-Peng/CNN-based-Cloud-Detection-Methods
|
1393a6886e62f1ed5a612d57c5a725c763a6b2cc
|
[
"MIT"
] | null | null | null |
model/tlnet/__init__.py
|
LK-Peng/CNN-based-Cloud-Detection-Methods
|
1393a6886e62f1ed5a612d57c5a725c763a6b2cc
|
[
"MIT"
] | 1
|
2022-02-16T03:30:20.000Z
|
2022-02-16T03:30:20.000Z
|
from .tlnet_model import TLNet
| 15.5
| 30
| 0.83871
| 5
| 31
| 5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
853393b48604e1d7b59625a8b6910c0a3a30dfb7
| 155
|
py
|
Python
|
exapi/requesters/hitbtc/trading/__init__.py
|
astsu-dev/exapi
|
1ef39ccdd77e9ddb60ec6eaa16a2cc26e1ac3e12
|
[
"MIT"
] | null | null | null |
exapi/requesters/hitbtc/trading/__init__.py
|
astsu-dev/exapi
|
1ef39ccdd77e9ddb60ec6eaa16a2cc26e1ac3e12
|
[
"MIT"
] | null | null | null |
exapi/requesters/hitbtc/trading/__init__.py
|
astsu-dev/exapi
|
1ef39ccdd77e9ddb60ec6eaa16a2cc26e1ac3e12
|
[
"MIT"
] | null | null | null |
from exapi.requesters.hitbtc.trading.interface import IHitbtcTradingRequester
from exapi.requesters.hitbtc.trading.requester import HitbtcTradingRequester
| 51.666667
| 77
| 0.896774
| 16
| 155
| 8.6875
| 0.625
| 0.129496
| 0.273381
| 0.359712
| 0.460432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051613
| 155
| 2
| 78
| 77.5
| 0.945578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8547f2b194d435aeac51c975ed7aa5ed54aa7c4d
| 39
|
py
|
Python
|
pguoauth/__init__.py
|
olekhov/pguoauth
|
4a333994d80884c27dfb68a55661fcb4a55ce2bf
|
[
"MIT"
] | null | null | null |
pguoauth/__init__.py
|
olekhov/pguoauth
|
4a333994d80884c27dfb68a55661fcb4a55ce2bf
|
[
"MIT"
] | 2
|
2019-09-05T20:29:52.000Z
|
2021-10-01T14:20:08.000Z
|
pguoauth/__init__.py
|
olekhov/pguoauth
|
4a333994d80884c27dfb68a55661fcb4a55ce2bf
|
[
"MIT"
] | null | null | null |
from .pguoauth import PGUAuthenticator
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a4482cf17594d6a50319030d1fab9dda1aa17580
| 52
|
py
|
Python
|
src/__init__.py
|
RyanThomas/mta-bus-archive
|
525265ea0933c33ca8a0c59a16d9b6f73a32fc27
|
[
"Apache-1.1"
] | 9
|
2017-07-15T16:40:36.000Z
|
2020-10-15T12:50:31.000Z
|
src/__init__.py
|
RyanThomas/mta-bus-archive
|
525265ea0933c33ca8a0c59a16d9b6f73a32fc27
|
[
"Apache-1.1"
] | 5
|
2017-06-10T00:15:12.000Z
|
2021-03-04T02:40:42.000Z
|
src/__init__.py
|
RyanThomas/mta-bus-archive
|
525265ea0933c33ca8a0c59a16d9b6f73a32fc27
|
[
"Apache-1.1"
] | 2
|
2017-09-15T16:52:20.000Z
|
2021-03-04T02:25:08.000Z
|
from . import model
from . import gtfs_realtime_pb2
| 17.333333
| 31
| 0.807692
| 8
| 52
| 5
| 0.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.153846
| 52
| 2
| 32
| 26
| 0.886364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a476a158ea97ff6ec6294fa4ef9fb61ae942f32f
| 188
|
py
|
Python
|
reportingsquad/runscompare/admin.py
|
VoloBro/SimpleReporting
|
ced89864cd9e2838d8e44297d19de2d96fa5f0b1
|
[
"Apache-2.0"
] | null | null | null |
reportingsquad/runscompare/admin.py
|
VoloBro/SimpleReporting
|
ced89864cd9e2838d8e44297d19de2d96fa5f0b1
|
[
"Apache-2.0"
] | null | null | null |
reportingsquad/runscompare/admin.py
|
VoloBro/SimpleReporting
|
ced89864cd9e2838d8e44297d19de2d96fa5f0b1
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(TestCase)
admin.site.register(TestRun)
admin.site.register(TestCaseStatus)
admin.site.register(TestExecution)
| 18.8
| 35
| 0.819149
| 24
| 188
| 6.416667
| 0.5
| 0.233766
| 0.441558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079787
| 188
| 9
| 36
| 20.888889
| 0.890173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8ef97bc9d523ac2b3f7e76b43a97e40bc24b734f
| 4,414
|
py
|
Python
|
test/t_rsa_plot.py
|
const7/NeuroRA
|
cdd344b1d7050c91b37a63e347b11345e3f0b193
|
[
"MIT"
] | 110
|
2019-04-30T03:52:48.000Z
|
2022-03-19T08:23:38.000Z
|
test/t_rsa_plot.py
|
const7/NeuroRA
|
cdd344b1d7050c91b37a63e347b11345e3f0b193
|
[
"MIT"
] | 3
|
2020-11-24T22:01:58.000Z
|
2021-11-26T02:09:52.000Z
|
test/t_rsa_plot.py
|
const7/NeuroRA
|
cdd344b1d7050c91b37a63e347b11345e3f0b193
|
[
"MIT"
] | 20
|
2020-03-02T11:58:30.000Z
|
2021-12-31T08:29:53.000Z
|
# -*- coding: utf-8 -*-
' a module for testing neurora.rsa_plot module '
__author__ = 'Zitong Lu'
import os
import numpy as np
import unittest
from neurora.rsa_plot import plot_rdm, plot_rdm_withvalue, plot_corrs_by_time, plot_tbytsim_withstats
from neurora.rsa_plot import plot_corrs_hotmap, plot_corrs_hotmap_stats, plot_nps_hotmap, plot_stats_hotmap
from neurora.rsa_plot import plot_brainrsa_regions, plot_brainrsa_montage, plot_brainrsa_glass, plot_brainrsa_surface, \
plot_brainrsa_rlts
class test_rsa_plot(unittest.TestCase):
def test_plot_rdm(self):
rdm = np.random.rand(8, 8)
output = plot_rdm(rdm)
self.assertEqual(output, 0)
rdm = np.random.rand(7, 8)
output = plot_rdm(rdm)
self.assertEqual(output, "Invalid input!")
def test_plot_rdm_withvalue(self):
rdm = np.random.rand(8, 8)
output = plot_rdm(rdm)
self.assertEqual(output, 0)
rdm = np.random.rand(7, 8)
output = plot_rdm_withvalue(rdm)
self.assertEqual(output, "Invalid input!")
def test_plot_corrs_by_time(self):
corrs = np.random.rand(100, 5, 2)
output = plot_corrs_by_time(corrs)
self.assertEqual(output, 0)
corrs = np.random.rand(100, 5)
output = plot_corrs_by_time(corrs)
self.assertEqual(output, 0)
corrs = np.random.rand(100, 5, 2, 2)
output = plot_corrs_by_time(corrs)
self.assertEqual(output, "Invalid input!")
def test_plot_tbytsim_withstats(self):
Similarities = np.random.rand(20, 10, 2)
output = plot_tbytsim_withstats(Similarities)
self.assertEqual(output, 0)
Similarities = np.random.rand(20, 10)
output = plot_tbytsim_withstats(Similarities)
self.assertEqual(output, 0)
Similarities = np.random.rand(20, 10, 2, 2)
output = plot_tbytsim_withstats(Similarities)
self.assertEqual(output, "Invalid input!")
def test_plot_corrs_hotmap(self):
corrs = np.random.rand(100, 5, 2)
output = plot_corrs_hotmap(corrs)
self.assertEqual(output, 0)
corrs = np.random.rand(100, 5)
output = plot_corrs_hotmap(corrs)
self.assertEqual(output, 0)
corrs = np.random.rand(100, 5, 2, 2)
output = plot_corrs_hotmap(corrs)
self.assertEqual(output, "Invalid input!")
def test_plot_corrs_hotmap_stats(self):
stats = np.random.rand(100, 5, 2)
corrs = np.random.rand(100, 5, 2)
output = plot_corrs_hotmap_stats(corrs, stats)
self.assertEqual(output, 0)
corrs = np.random.rand(100, 5)
output = plot_corrs_hotmap_stats(corrs, stats)
self.assertEqual(output, 0)
corrs = np.random.rand(100, 5, 2, 2)
output = plot_corrs_hotmap_stats(corrs, stats)
self.assertEqual(output, "Invalid input!")
def test_plot_nps_hotmap(self):
similarities = np.random.rand(10, 2)
output = plot_nps_hotmap(similarities)
self.assertEqual(output, 0)
similarities = np.random.rand(10, 2, 2)
output = plot_nps_hotmap(similarities)
self.assertEqual(output, "Invalid input!")
def test_plot_stats_hotmap(self):
similarities = np.random.rand(5, 10, 2)
output = plot_stats_hotmap(similarities)
self.assertEqual(output, 0)
similarities = np.random.rand(5, 10, 2, 2)
output = plot_stats_hotmap(similarities)
self.assertEqual(output, "Invalid input!")
def test_plot_brainrsa_regions(self):
img = '../neurora/template/ch2.nii.gz'
output = plot_brainrsa_regions(img)
self.assertEqual(output, 0)
def test_plot_brainrsa_montage(self):
img = '../neurora/template/ch2.nii.gz'
output = plot_brainrsa_montage(img)
self.assertEqual(output, 0)
def test_plot_brainrsa_glass(self):
img = '../neurora/template/ch2.nii.gz'
output = plot_brainrsa_glass(img)
self.assertEqual(output, 0)
def test_plot_brainrsa_surface(self):
img = '../neurora/template/ch2.nii.gz'
output = plot_brainrsa_surface(img)
self.assertEqual(output, 0)
def test_plot_brainrsa_rlts(self):
img = '../neurora/template/ch2.nii.gz'
output = plot_brainrsa_rlts(img)
self.assertEqual(output, 0)
if __name__ == '__main__':
unittest.main()
| 30.441379
| 120
| 0.656094
| 579
| 4,414
| 4.763385
| 0.108808
| 0.090645
| 0.190355
| 0.135606
| 0.819434
| 0.80892
| 0.754532
| 0.732777
| 0.724438
| 0.578318
| 0
| 0.033225
| 0.236294
| 4,414
| 145
| 121
| 30.441379
| 0.78493
| 0.015406
| 0
| 0.613861
| 0
| 0
| 0.073998
| 0.034153
| 0
| 0
| 0
| 0
| 0.247525
| 1
| 0.128713
| false
| 0
| 0.059406
| 0
| 0.19802
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f121aebf8065d14e484ee964e9f4320c51f83dd0
| 361
|
py
|
Python
|
gym_softrobot/utils/actuation/algorithms/__init__.py
|
nmnaughton/gym-softrobot
|
7b7eb9bfb97f2e3d2c3e2f7df50ca96426a2482f
|
[
"MIT"
] | 10
|
2022-01-11T19:49:02.000Z
|
2022-03-24T22:27:32.000Z
|
gym_softrobot/utils/actuation/algorithms/__init__.py
|
nmnaughton/gym-softrobot
|
7b7eb9bfb97f2e3d2c3e2f7df50ca96426a2482f
|
[
"MIT"
] | 7
|
2022-01-15T07:48:53.000Z
|
2022-03-07T17:43:44.000Z
|
gym_softrobot/utils/actuation/algorithms/__init__.py
|
nmnaughton/gym-softrobot
|
7b7eb9bfb97f2e3d2c3e2f7df50ca96426a2482f
|
[
"MIT"
] | 2
|
2022-03-06T19:43:06.000Z
|
2022-03-25T21:31:52.000Z
|
"""
Created on Oct. 19, 2020
@author: Heng-Sheng (Hanson) Chang
"""
from gym_softrobot.utils.actuation.algorithms.algorithm import *
from gym_softrobot.utils.actuation.algorithms.forward_backward import *
from gym_softrobot.utils.actuation.algorithms.forward_backward_muscle import *
from gym_softrobot.utils.actuation.algorithms.smoothing_algorithm2 import *
| 36.1
| 78
| 0.831025
| 46
| 361
| 6.347826
| 0.5
| 0.09589
| 0.219178
| 0.287671
| 0.712329
| 0.712329
| 0.575342
| 0.417808
| 0.417808
| 0
| 0
| 0.021021
| 0.077562
| 361
| 9
| 79
| 40.111111
| 0.855856
| 0.163435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f122abc58339b2da5da8c83bf6e49a4b3cbdd224
| 5,770
|
py
|
Python
|
tests/test_search_skill.py
|
OpenVoiceOS/ovos_skill_manager
|
20b9275cd929b250dd7e5c9b4700cb41b0f07c89
|
[
"Apache-2.0"
] | 4
|
2021-01-25T08:08:04.000Z
|
2022-03-06T01:58:41.000Z
|
tests/test_search_skill.py
|
OpenVoiceOS/ovos_skill_manager
|
20b9275cd929b250dd7e5c9b4700cb41b0f07c89
|
[
"Apache-2.0"
] | 70
|
2021-01-12T19:31:44.000Z
|
2022-03-15T16:45:57.000Z
|
tests/test_search_skill.py
|
OpenVoiceOS/ovos_skill_manager
|
20b9275cd929b250dd7e5c9b4700cb41b0f07c89
|
[
"Apache-2.0"
] | 1
|
2021-02-10T01:33:29.000Z
|
2021-02-10T01:33:29.000Z
|
import os
import sys
import unittest
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# APPSTORE_OPTIONS = ["ovos", "mycroft", "pling", "andlo", "default", "all"]
class SearchTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
from ovos_skills_manager.appstores.ovos import OVOSstore
OVOSstore().sync_skills_list()
if os.environ.get("GITHUB_TOKEN"):
from ovos_skills_manager.session import set_github_token
set_github_token(os.environ.get("GITHUB_TOKEN"))
def test_get_skills_mycroft(self):
from ovos_skills_manager.appstores.mycroft_marketplace import get_mycroft_marketplace_skills
skills = get_mycroft_marketplace_skills()
self.assertTrue(any(skills))
def test_get_skills_ovos(self):
from ovos_skills_manager.appstores.ovos import get_ovos_skills
skills = get_ovos_skills()
self.assertTrue(any(skills))
# TODO: get_neon needs auth, use env var + GH secret DM
def test_search_mycroft_all(self):
from ovos_skills_manager.scripts.search import search_skill
# methods = ['all', 'name', 'url', 'category', 'author', 'tag', 'description']
query = "dismissal"
fuzzy = True
thresh = 80
results = search_skill(method="all", query=query, fuzzy=fuzzy, no_ignore_case=False,
thresh=thresh, appstore="mycroft")
self.assertIsInstance(results, list)
self.assertTrue(len(results) > 0)
def test_search_mycroft_name(self):
from ovos_skills_manager.scripts.search import search_skill
# methods = ['all', 'name', 'url', 'category', 'author', 'tag', 'description']
query = "dismiss"
fuzzy = True
thresh = 80
results = search_skill(method="name", query=query, fuzzy=fuzzy, no_ignore_case=False,
thresh=thresh, appstore="mycroft")
self.assertIsInstance(results, list)
self.assertTrue(len(results) > 0)
def test_search_mycroft_url(self):
from ovos_skills_manager.scripts.search import search_skill
# methods = ['all', 'name', 'url', 'category', 'author', 'tag', 'description']
query = "https://github.com/ChanceNCounter/dismissal-skill"
fuzzy = False
thresh = 80
results = search_skill(method="url", query=query, fuzzy=fuzzy, no_ignore_case=False,
thresh=thresh, appstore="mycroft")
self.assertIsInstance(results, list)
self.assertTrue(len(results) > 0)
def test_search_neon_all(self):
from ovos_skills_manager.scripts.search import search_skill
# methods = ['all', 'name', 'url', 'category', 'author', 'tag', 'description']
query = "caffeine"
fuzzy = True
thresh = 80
results = search_skill(method="all", query=query, fuzzy=fuzzy, no_ignore_case=False,
thresh=thresh, appstore="neon")
self.assertIsInstance(results, list)
self.assertTrue(len(results) > 0)
def test_search_neon_name(self):
from ovos_skills_manager.scripts.search import search_skill
# methods = ['all', 'name', 'url', 'category', 'author', 'tag', 'description']
query = "Caffeine Wiz"
fuzzy = True
thresh = 80
results = search_skill(method="name", query=query, fuzzy=fuzzy, no_ignore_case=False,
thresh=thresh, appstore="neon")
self.assertIsInstance(results, list)
self.assertTrue(len(results) > 0)
def test_search_neon_url(self):
from ovos_skills_manager.scripts.search import search_skill
# methods = ['all', 'name', 'url', 'category', 'author', 'tag', 'description']
query = "https://github.com/NeonGeckoCom/caffeinewiz.neon"
fuzzy = False
thresh = 80
results = search_skill(method="url", query=query, fuzzy=fuzzy, no_ignore_case=False,
thresh=thresh, appstore="neon")
self.assertIsInstance(results, list)
self.assertTrue(len(results) > 0)
def test_search_ovos_all(self):
from ovos_skills_manager.scripts.search import search_skill
# methods = ['all', 'name', 'url', 'category', 'author', 'tag', 'description']
query = "launcher"
fuzzy = True
thresh = 80
results = search_skill(method="all", query=query, fuzzy=fuzzy, no_ignore_case=False,
thresh=thresh, appstore="ovos")
self.assertIsInstance(results, list)
self.assertTrue(len(results) > 0)
def test_search_ovos_name(self):
from ovos_skills_manager.scripts.search import search_skill
# methods = ['all', 'name', 'url', 'category', 'author', 'tag', 'description']
query = "launcher"
fuzzy = True
thresh = 80
results = search_skill(method="name", query=query, fuzzy=fuzzy, no_ignore_case=False,
thresh=thresh, appstore="ovos")
self.assertIsInstance(results, list)
self.assertTrue(len(results) > 0)
def test_search_ovos_url(self):
from ovos_skills_manager.scripts.search import search_skill
# methods = ['all', 'name', 'url', 'category', 'author', 'tag', 'description']
query = "https://github.com/NeonGeckoCom/launcher.neon"
fuzzy = False
thresh = 80
results = search_skill(method="url", query=query, fuzzy=fuzzy, no_ignore_case=False,
thresh=thresh, appstore="ovos")
self.assertIsInstance(results, list)
self.assertTrue(len(results) > 0)
# TODO: Pling, andlo searches
if __name__ == '__main__':
unittest.main()
| 42.116788
| 100
| 0.629463
| 650
| 5,770
| 5.390769
| 0.136923
| 0.056507
| 0.051941
| 0.077911
| 0.830194
| 0.800514
| 0.789669
| 0.766838
| 0.766838
| 0.766838
| 0
| 0.006253
| 0.251646
| 5,770
| 136
| 101
| 42.426471
| 0.805234
| 0.14714
| 0
| 0.650485
| 0
| 0
| 0.061328
| 0
| 0
| 0
| 0
| 0.007353
| 0.194175
| 1
| 0.116505
| false
| 0
| 0.15534
| 0
| 0.281553
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f145fbdd1a58ed8e73a3bf8759c6e66287084b41
| 223
|
py
|
Python
|
cpy/parser/pygrammar.py
|
lodevil/cpy
|
bb3cc0dfc7d9ddfc20ea97d2721430a0a8029812
|
[
"MIT"
] | null | null | null |
cpy/parser/pygrammar.py
|
lodevil/cpy
|
bb3cc0dfc7d9ddfc20ea97d2721430a0a8029812
|
[
"MIT"
] | null | null | null |
cpy/parser/pygrammar.py
|
lodevil/cpy
|
bb3cc0dfc7d9ddfc20ea97d2721430a0a8029812
|
[
"MIT"
] | null | null | null |
from .grammar import Grammar
from .pystates import single_input, file_input, eval_input, symbols
grammar = Grammar(symbols, {
'single_input': single_input,
'file_input': file_input,
'eval_input': eval_input})
| 24.777778
| 67
| 0.744395
| 29
| 223
| 5.413793
| 0.310345
| 0.210191
| 0.267516
| 0.254777
| 0.292994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156951
| 223
| 8
| 68
| 27.875
| 0.835106
| 0
| 0
| 0
| 0
| 0
| 0.143498
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f17843d4ddf3c1087aedcca793074e2482db3e74
| 33
|
py
|
Python
|
bot/__init__.py
|
colorfuldisaster/adolf-scriptler
|
68b006e264e5d6f173f8a6b97b460fc43209d2ed
|
[
"MIT"
] | null | null | null |
bot/__init__.py
|
colorfuldisaster/adolf-scriptler
|
68b006e264e5d6f173f8a6b97b460fc43209d2ed
|
[
"MIT"
] | null | null | null |
bot/__init__.py
|
colorfuldisaster/adolf-scriptler
|
68b006e264e5d6f173f8a6b97b460fc43209d2ed
|
[
"MIT"
] | null | null | null |
from .discord_interface import *
| 16.5
| 32
| 0.818182
| 4
| 33
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74c693143eac03429dd7e52e4481fdc5d0daa1ac
| 9,734
|
py
|
Python
|
src/tests/explainers/cnn_explainer_test.py
|
MANISH007700/explainable-cnn
|
c9f0346e137fdfce3160a779d57a05a70fb97b06
|
[
"MIT"
] | 106
|
2022-03-16T02:20:39.000Z
|
2022-03-31T21:58:30.000Z
|
src/tests/explainers/cnn_explainer_test.py
|
MANISH007700/explainable-cnn
|
c9f0346e137fdfce3160a779d57a05a70fb97b06
|
[
"MIT"
] | 2
|
2022-03-27T22:31:20.000Z
|
2022-03-29T14:28:57.000Z
|
src/tests/explainers/cnn_explainer_test.py
|
MANISH007700/explainable-cnn
|
c9f0346e137fdfce3160a779d57a05a70fb97b06
|
[
"MIT"
] | 8
|
2022-03-14T01:43:22.000Z
|
2022-03-31T14:41:15.000Z
|
import pytest
import torchvision
import torchvision.models as models
from explainable_cnn import CNNExplainer
class TestCNNExplainer:
obj = CNNExplainer(models.resnet18(),
{0: "Cat", 1: "Dog"},
"cpu")
def test_get_label_name_from_index(self):
cls = self.__class__
assert cls.obj.get_label_name_from_index(0) == "Cat"
assert cls.obj.get_label_name_from_index(1) == "Dog"
def test_get_label_index_from_name(self):
cls = self.__class__
assert cls.obj.get_label_index_from_name("Cat") == 0
assert cls.obj.get_label_index_from_name("Dog") == 1
def test_get_label_information(self):
cls = self.__class__
label_index, label_name = cls.obj.get_label_information("Cat")
assert label_index == 0
assert label_name == "Cat"
label_index, label_name = cls.obj.get_label_information(1)
assert label_index == 1
assert label_name == "Dog"
def test_get_grad_cam_image_label_type_fail(self, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_grad_cam(image_file, 1.5, (224, 224), ["relu"])
@pytest.mark.parametrize("image_label", ["Tiger", 2])
def test_get_grad_cam_image_label_value_fail(self, image_label, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_grad_cam(image_file, image_label, (224, 224), ["relu"])
@pytest.mark.parametrize("layers", [["random", "value"], [1, 2], [1.5]])
def test_get_grad_cam_layers_fail(self, layers, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_grad_cam(image_file, 0, (224, 224), layers)
@pytest.mark.parametrize("input_shape",
[(1,), [1], (1, 2, 3, 4), [1, 2, 3, 4]])
def test_get_grad_cam_input_shape_value_fail(self, input_shape, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_grad_cam(image_file, 0, input_shape, ["relu"])
@pytest.mark.parametrize("input_shape", [1, 1.5, "random"])
def test_get_grad_cam_input_shape_type_fail(self, input_shape, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_grad_cam(image_file, 0, input_shape, ["relu"])
def test_get_guided_grad_cam_image_label_type_fail(self, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_guided_grad_cam(image_file, 1.5, (224, 224), ["relu"])
@pytest.mark.parametrize("image_label", ["Tiger", 2])
def test_get_guided_grad_cam_image_label_value_fail(self,
image_label,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_guided_grad_cam(image_file,
image_label,
(224, 224),
["relu"])
@pytest.mark.parametrize("layers", [["random", "value"], [1, 2], [1.5]])
def test_get_guided_grad_cam_layers_fail(self, layers, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_guided_grad_cam(image_file, 0, (224, 224), layers)
@pytest.mark.parametrize("input_shape",
[(1,), [1], (1, 2, 3, 4), [1, 2, 3, 4]])
def test_get_guided_grad_cam_input_shape_value_fail(self,
input_shape,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_guided_grad_cam(image_file, 0, input_shape, ["relu"])
@pytest.mark.parametrize("input_shape", [1, 1.5, "random"])
def test_get_guided_grad_cam_input_shape_type_fail(self,
input_shape,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_guided_grad_cam(image_file, 0, input_shape, ["relu"])
@pytest.mark.parametrize("transforms", [1, 1.5, "random value", [1, 2, 3]])
def test_get_guided_grad_cam_transforms_fail(self, transforms, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_guided_grad_cam(image_file,
0,
(224, 224),
["relu"],
transforms)
def test_get_guided_back_propagation_image_label_type_fail(self, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_guided_back_propagation(image_file, 1.5, (224, 224))
@pytest.mark.parametrize("image_label", ["Tiger", 2])
def test_get_guided_back_propagation_image_label_value_fail(self,
image_label,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_guided_back_propagation(image_file,
image_label,
(224, 224))
@pytest.mark.parametrize("input_shape",
[(1,), [1], (1, 2, 3, 4), [1, 2, 3, 4]])
def test_get_guided_back_propagation_input_shape_value_fail(self,
input_shape,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_guided_back_propagation(image_file, 0, input_shape)
@pytest.mark.parametrize("input_shape", [1, 1.5, "random"])
def test_get_guided_back_propagation_input_shape_type_fail(self,
input_shape,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_guided_back_propagation(image_file, 0, input_shape)
@pytest.mark.parametrize("transforms",
[1, 1.5, "random value", [1, 2, 3]])
def test_get_guided_back_propagation_transforms_fail(self,
transforms,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_guided_back_propagation(image_file,
0,
(224, 224),
transforms)
def test_get_saliency_map_image_label_type_fail(self, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_saliency_map(image_file, 1.5, (224, 224))
@pytest.mark.parametrize("image_label", ["Tiger", 2])
def test_get_saliency_map_image_label_value_fail(self,
image_label,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_saliency_map(image_file, image_label, (224, 224))
@pytest.mark.parametrize("input_shape",
[(1,), [1], (1, 2, 3, 4), [1, 2, 3, 4]])
def test_get_saliency_map_input_shape_value_fail(self,
input_shape,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(ValueError):
cls.obj.get_saliency_map(image_file, 0, input_shape)
@pytest.mark.parametrize("input_shape", [1, 1.5, "random"])
def test_get_saliency_map_input_shape_type_fail(self,
input_shape,
tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_saliency_map(image_file, 0, input_shape)
@pytest.mark.parametrize("transforms",
[1, 1.5, "random value", [1, 2, 3]])
def test_get_saliency_map_transforms_fail(self, transforms, tmp_path):
cls = self.__class__
image_file = tmp_path / "sample.png"
with pytest.raises(TypeError):
cls.obj.get_saliency_map(image_file, 0, (224, 224), transforms)
| 46.132701
| 79
| 0.539347
| 1,110
| 9,734
| 4.316216
| 0.055856
| 0.061365
| 0.05072
| 0.061365
| 0.933834
| 0.922772
| 0.910248
| 0.89689
| 0.858485
| 0.825089
| 0
| 0.030824
| 0.360078
| 9,734
| 210
| 80
| 46.352381
| 0.738321
| 0
| 0
| 0.695652
| 0
| 0
| 0.056708
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.130435
| false
| 0
| 0.021739
| 0
| 0.163043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
74c967f1f486f9c78416fa133e2271aa2b0f7076
| 217
|
py
|
Python
|
tests/test_router.py
|
villekr/ocpp-asgi
|
032e3843b09c1b6a1c2a1d1accc1bea2b125e397
|
[
"MIT"
] | 2
|
2021-10-19T04:54:59.000Z
|
2021-12-11T21:57:17.000Z
|
tests/test_router.py
|
villekr/ocpp-asgi
|
032e3843b09c1b6a1c2a1d1accc1bea2b125e397
|
[
"MIT"
] | null | null | null |
tests/test_router.py
|
villekr/ocpp-asgi
|
032e3843b09c1b6a1c2a1d1accc1bea2b125e397
|
[
"MIT"
] | 1
|
2021-09-06T10:42:08.000Z
|
2021-09-06T10:42:08.000Z
|
from ocpp_asgi.router import Subprotocol, subprotocol_to_ocpp_version
def test_subprotocol_to_ocpp_version():
ocpp_version: str = subprotocol_to_ocpp_version(Subprotocol.ocpp16)
assert ocpp_version == "1.6"
| 31
| 71
| 0.815668
| 30
| 217
| 5.466667
| 0.5
| 0.335366
| 0.310976
| 0.439024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.115207
| 217
| 6
| 72
| 36.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.013825
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
74de4e2764192e1ef35b1262c6e2764f4a77f0ca
| 13,936
|
py
|
Python
|
smii/test/models_scalar.py
|
ar4/smii
|
b7eee03f2a4c8f56f6dde61738e8aa1090621ba3
|
[
"MIT"
] | 3
|
2018-07-02T15:50:34.000Z
|
2019-02-28T11:42:34.000Z
|
smii/test/models_scalar.py
|
ar4/smii
|
b7eee03f2a4c8f56f6dde61738e8aa1090621ba3
|
[
"MIT"
] | null | null | null |
smii/test/models_scalar.py
|
ar4/smii
|
b7eee03f2a4c8f56f6dde61738e8aa1090621ba3
|
[
"MIT"
] | null | null | null |
"""Create constant and point scatterer models."""
import numpy as np
import scipy.special
import scipy.integrate
from scipy.ndimage.interpolation import shift
from smii.modeling.propagators.propagators import (Scalar1D, Scalar2D)
from smii.modeling.wavelets.wavelets import ricker
from smii.modeling.forward_model import forward_model
from smii.inversion.fwi import costjac
def direct_1d(x, x_s, dx, dt, c, f):
"""Use the 1D Green's function to determine the wavefield at a given
location and time due to the given source.
"""
r = np.abs(x - x_s)
t_shift = (r/c) / dt + 1
u = dx * dt * c / 2 * np.cumsum(shift(f, t_shift))
return u
def direct_2d(x, t, x_s, dx, dt, c, f):
"""Use the 2D Green's function to determine the wavefield at a given
location and time due to the given source.
"""
r = np.linalg.norm(x - x_s)
t_max = np.maximum(0, int((t - r/c) / dt))
tmtp = t - np.arange(t_max) * dt
summation = np.sum(f[:t_max] / np.sqrt(c**2 * tmtp**2 - r**2))
u = dx**2 * dt * c / 2 / np.pi * summation
return u
def direct_2d2(x, x_s, dx, dt, c, f):
"""Use the 2D Green's function to determine the wavefield at a given
location and time due to the given source.
"""
r = np.linalg.norm(x - x_s)
nt = len(f)
def func(tp, t):
return f[int(tp / dt)] / np.sqrt(c**2 * (t - tp)**2 - r**2)
u = np.zeros_like(f)
t_max = int(r/c / dt)
for t_idx in range(t_max):
t = t_idx * dt
u[t_idx] = scipy.integrate.quad(func, 0, t, (t+dt))[0]
u *= dx**2 * dt * c / 2 / np.pi
return u
def direct_2d_approx(x, x_s, dx, dt, c, f):
"""Same as direct_2d, but using an approximation to calculate the result
for the whole time range of the source.
"""
r = np.linalg.norm(x - x_s)
nt = len(f)
w = np.fft.rfftfreq(nt, dt)
fw = np.fft.rfft(f)
G = 1j / 4 * scipy.special.hankel1(0, -2 * np.pi * w * r / c)
G[0] = 0
s = G * fw * dx**2
u = np.fft.irfft(s, nt)
return u
def direct_3d(x, x_s, dx, dt, c, f):
"""Use the 3D Green's function to determine the wavefield at a given
location and time due to the given source.
"""
r = np.linalg.norm(x - x_s)
t_shift = (r/c) / dt + 1
u = dx**3 * dt / 4 / np.pi / r * shift(f, t_shift)
return u
def scattered_1d(x, x_s, x_p, dx, dt, c, dc, f):
u_p = direct_1d(x_p, x_s, dx, dt, c, f)
du_pdt2 = np.gradient(np.gradient(u_p)) / dt**2
u = 2 * dc / c**3 * direct_1d(x, x_p, dx, dt, c, du_pdt2)
return u
def scattered_2d(x, x_s, x_p, dx, dt, c, dc, f):
u_p = direct_2d_approx(x_p, x_s, dx, dt, c, f)
du_pdt2 = np.gradient(np.gradient(u_p)) / dt**2
u = 2 * dc / c**3 * direct_2d_approx(x, x_p, dx, dt, c, du_pdt2)
return u
def scattered_3d(x, x_s, x_p, dx, dt, c, dc, f):
u_p = direct_3d(x_p, x_s, dx, dt, c, f)
du_sdt2 = np.gradient(np.gradient(u_p)) / dt**2
u = 2 * dc / c**3 * direct_3d(x, x_p, dx, dt, c, du_pdt2)
return u
def grad_1d(nx, x_r, x_s, x_p, dx, dt, c, dc, f):
d = -scattered_1d(x_r, x_s, x_p, dx, dt, c, dc, f)[::-1]
grad = np.zeros(nx, np.float32)
for x_idx in range(nx):
x = x_idx*dx
u_r = direct_1d(x, x_r, dx, dt, c, d)[::-1]
u_0 = direct_1d(x, x_s, dx, dt, c, f)
du_0dt2 = np.gradient(np.gradient(u_0)) / dt**2
grad[x_idx] = 2 * dt / c**3 * np.sum(u_r * du_0dt2)
return grad
def grad_2d(nx, x_r, x_s, x_p, dx, dt, c, dc, f):
d = -scattered_2d(x_r, x_s, x_p, dx, dt, c, dc, f)[::-1]
grad = np.zeros(nx, np.float32)
for z_idx in range(nx[0]):
for x_idx in range(nx[1]):
x = np.array([z_idx*dx, x_idx*dx])
u_r = direct_2d_approx(x, x_r, dx, dt, c, d)[::-1]
u_0 = direct_2d_approx(x, x_s, dx, dt, c, f)
du_0dt2 = np.gradient(np.gradient(u_0)) / dt**2
grad[z_idx, x_idx] = 2 * dt / c**3 * np.sum(u_r * du_0dt2)
return grad
def grad_1d_fd(model_true, model_init, x_r, x_s, dx, dt, dc, f,
propagator=None, prop_kwargs=None):
x_r_idx, x_s_idx = (np.array([x_r, x_s]) / dx).astype(np.int)
source, receiver_locations = _make_source_receiver(x_s_idx, x_r_idx, f)
if propagator is None:
propagator = Scalar1D
if prop_kwargs is None:
prop_kwargs = {}
prop = propagator(model_true, dx, dt, source, **prop_kwargs)
true_data, _ = forward_model(prop, receiver_locations)
receiver = {}
receiver['amplitude'] = true_data.receivers
receiver['locations'] = receiver_locations
dataset = [(source, receiver)]
init_cost, fwi_grad = costjac(model_init, dataset, dx, dt, propagator,
model_init.shape, compute_grad=True,
prop_kwargs=prop_kwargs)
nx = len(model_true)
true_grad = np.zeros(nx, np.float32)
for x_idx in range(nx):
tmp_model = model_init.copy()
tmp_model[x_idx] += dc
new_cost, _ = costjac(tmp_model, dataset, dx, dt, propagator,
model_init.shape, compute_grad=False,
prop_kwargs=prop_kwargs)
true_grad[x_idx] = (new_cost - init_cost) / dc
return fwi_grad, true_grad
def grad_2d_fd(model_true, model_init, x_r, x_s, dx, dt, dc, f,
propagator=None, prop_kwargs=None):
x_r_idx, x_s_idx = (np.array([x_r, x_s]) / dx).astype(np.int)
source, receiver_locations = _make_source_receiver(x_s_idx, x_r_idx, f)
if propagator is None:
propagator = Scalar2D
if prop_kwargs is None:
prop_kwargs = {}
prop = propagator(model_true, dx, dt, source, **prop_kwargs)
true_data, _ = forward_model(propagator, receiver_locations)
receiver = {}
receiver['amplitude'] = true_data.receivers
receiver['locations'] = receiver_locations
dataset = [(source, receiver)]
init_cost, fwi_grad = costjac(model_init, dataset, dx, dt, propagator,
model_init.shape, compute_grad=True,
prop_kwargs=prop_kwargs)
true_grad = np.zeros_like(model_true)
for z_idx in range(model_true.shape[0]):
for x_idx in range(model_true.shape[1]):
tmp_model = model_init.copy()
tmp_model[z_idx, x_idx] += dc
new_cost, _ = costjac(tmp_model, dataset, dx, dt, propagator,
model_init.shape, compute_grad=False,
prop_kwargs=prop_kwargs)
true_grad[z_idx, x_idx] = (new_cost - init_cost) / dc
return fwi_grad, true_grad
def _make_source_receiver(x_s_idx, x_r_idx, f):
source = {}
source['amplitude'] = f.reshape(1, 1, -1)
source['locations'] = x_s_idx.reshape(1, 1, -1)
receiver_locations = x_r_idx.reshape(1, 1, -1)
return source, receiver_locations
def _set_coords(x, dx):
x_m = np.array(x) * dx
x_idx = np.array(x)
return x_m, x_idx
def model_direct_1d(c=1500, freq=25, dx=5, dt=0.0001, nx=80,
propagator=None, prop_kwargs=None):
"""Create a constant model, and the expected waveform at point,
and the forward propagated wave.
"""
model = np.ones(nx, dtype=np.float32) * c
nt = int(2*nx*dx/c/dt)
x_s, x_s_idx = _set_coords([[1]], dx)
x_r, x_r_idx = _set_coords([[nx-1]], dx)
f = ricker(freq, nt, dt, 0.05)
expected = direct_1d(x_r, x_s, dx, dt, c, f)
source, receiver_locations = _make_source_receiver(x_s_idx, x_r_idx, f)
if propagator is None:
propagator = Scalar1D
if prop_kwargs is None:
prop_kwargs = {}
prop = propagator(model, dx, dt, source, **prop_kwargs)
actual, _ = forward_model(prop, receiver_locations)
return expected, actual.receivers.ravel()
def model_direct_2d(c=1500, freq=25, dx=5, dt=0.0001, nx=[50, 50],
propagator=None, prop_kwargs=None):
"""Create a constant model, and the expected waveform at point,
and the forward propagated wave.
"""
model = np.ones(nx, dtype=np.float32) * c
nt = int(2*nx[0]*dx/c/dt)
middle = int(nx[1]/2)
x_s, x_s_idx = _set_coords([[1, middle]], dx)
x_r, x_r_idx = _set_coords([[nx[0]-1, middle]], dx)
#x_r, x_r_idx = _set_coords([[1, middle]], dx)
f = ricker(freq, nt, dt, 0.05)
expected = direct_2d_approx(x_r, x_s, dx, dt, c, f)
source, receiver_locations = _make_source_receiver(x_s_idx, x_r_idx, f)
if propagator is None:
propagator = Scalar2D
if prop_kwargs is None:
prop_kwargs = {}
prop = propagator(model, dx, dt, source, **prop_kwargs)
actual, _ = forward_model(prop, receiver_locations)
return expected, actual.receivers.ravel()
def model_scatter_1d(c=1500, dc=50, freq=25, dx=5, dt=0.0001, nx=100,
propagator=None, prop_kwargs=None):
"""Create a point scatterer model, and the expected waveform at point,
and the forward propagated wave.
"""
model = np.ones(nx, dtype=np.float32) * c
nt = int((3*nx*dx/c + 0.05)/dt)
x_s, x_s_idx = _set_coords([[1]], dx)
x_r, x_r_idx = _set_coords([[1]], dx)
x_p, x_p_idx = _set_coords([[nx-20]], dx)
f = ricker(freq, nt, dt, 0.05)
model[x_p_idx] += dc
expected = scattered_1d(x_r, x_s, x_p, dx, dt, c, dc, f)
source, receiver_locations = _make_source_receiver(x_s_idx, x_r_idx, f)
if propagator is None:
propagator = Scalar1D
if prop_kwargs is None:
prop_kwargs = {}
prop = propagator(model, dx, dt, source, **prop_kwargs)
actual, _ = forward_model(prop, receiver_locations)
return expected, actual.receivers.ravel()
def model_scatter_2d(c=1500, dc=150, freq=25, dx=5, dt=0.0001, nx=[50, 50],
propagator=None, prop_kwargs=None):
"""Create a point scatterer model, and the expected waveform at point,
and the forward propagated wave.
"""
nx = np.array(nx)
model = np.ones(nx, dtype=np.float32) * c
nt = int((3*nx[0]*dx/c + 0.05)/dt)
middle = int(nx[1]/2)
x_s, x_s_idx = _set_coords([[1, middle]], dx)
x_r, x_r_idx = _set_coords([[1, middle]], dx)
x_p, x_p_idx = _set_coords([[nx[0]-10, middle]], dx)
f = ricker(freq, nt, dt, 0.05)
model[x_p_idx[0, 0], x_p_idx[0, 1]] += dc
expected = scattered_2d(x_r, x_s, x_p, dx, dt, c, dc, f)
source, receiver_locations = _make_source_receiver(x_s_idx, x_r_idx, f)
if propagator is None:
propagator = Scalar2D
if prop_kwargs is None:
prop_kwargs = {}
prop = propagator(model, dx, dt, source, **prop_kwargs)
actual, _ = forward_model(prop, receiver_locations)
return expected, actual.receivers.ravel()
def model_grad_const_1d(c=1500, dc=1, freq=25, dx=5, dt=0.0001, nx=100,
propagator=None, prop_kwargs=None):
"""Create a point scatterer model, and the gradient.
"""
nt = int((3*nx*dx/c + 0.1)/dt)
x_s, x_s_idx = _set_coords([[1]], dx)
x_r, x_r_idx = _set_coords([[1]], dx)
x_p, x_p_idx = _set_coords([[nx-20]], dx)
f = ricker(freq, nt, dt, 0.05)
model_init = np.ones(nx, dtype=np.float32) * c
model_true = model_init.copy()
model_true[x_p_idx] += dc
expected = grad_1d(nx, x_r, x_s, x_p, dx, dt, c, dc, f)
fwi_grad, true_grad = grad_1d_fd(model_true, model_init, x_r, x_s, dx, dt,
dc, f, propagator, prop_kwargs)
return expected, fwi_grad, true_grad
def model_grad_const_2d(c=1500, dc=1, freq=25, dx=5, dt=0.0001, nx=[20, 20],
propagator=None, prop_kwargs=None):
"""Create a point scatterer model, and the gradient.
"""
nt = int((3*nx[0]*dx/c + 0.1)/dt)
middle = int(nx[1]/2)
x_s, x_s_idx = _set_coords([[1, middle]], dx)
x_r, x_r_idx = _set_coords([[1, middle]], dx)
x_p, x_p_idx = _set_coords([[nx[0]-5, middle]], dx)
f = ricker(freq, nt, dt, 0.05)
model_init = np.ones(nx, dtype=np.float32) * c
model_true = model_init.copy()
model_true[x_p_idx[0, 0], x_p_idx[0, 1]] += dc
expected = grad_2d(nx, x_r, x_s, x_p, dx, dt, c, dc, f)
fwi_grad, true_grad = grad_2d_fd(model_true, model_init, x_r, x_s, dx, dt,
dc, f, propagator, prop_kwargs)
return expected, fwi_grad, true_grad
def model_grad_rand_1d(c=2000, randc=100, dc=1, freq=25, dx=5, dt=0.0001,
nx=100, propagator=None, prop_kwargs=None):
"""Create a point scatterer model, and the gradient.
"""
nt = int((3*nx*dx/c + 0.1)/dt)
x_s, x_s_idx = _set_coords([[1]], dx)
x_r, x_r_idx = _set_coords([[1]], dx)
x_p, x_p_idx = _set_coords([[nx-20]], dx)
f = ricker(freq, nt, dt, 0.05)
model_init = (np.random.rand(nx).astype(np.float32) * randc) + c
model_true = model_init.copy()
model_true += np.random.rand(nx).astype(np.float32) * dc
fwi_grad, true_grad = grad_1d_fd(model_true, model_init, x_r, x_s, dx, dt,
dc, f, propagator, prop_kwargs)
return fwi_grad, true_grad
def model_grad_rand_2d(c=2000, randc=100, dc=1, freq=25, dx=5, dt=0.0001,
nx=[20, 20], propagator=None, prop_kwargs=None):
"""Create a point scatterer model, and the gradient.
"""
nt = int((3*nx[0]*dx/c + 0.1)/dt)
middle = int(nx[1]/2)
x_s, x_s_idx = _set_coords([[1, middle]], dx)
x_r, x_r_idx = _set_coords([[1, middle]], dx)
x_p, x_p_idx = _set_coords([[nx[0]-5, middle]], dx)
f = ricker(freq, nt, dt, 0.05)
model_init = (np.random.rand(nx[0], nx[1]).astype(np.float32) * randc) + c
model_true = model_init.copy()
model_true += np.random.rand(nx[0], nx[1]).astype(np.float32) * dc
fwi_grad, true_grad = grad_2d_fd(model_true, model_init, x_r, x_s, dx, dt,
dc, f, propagator, prop_kwargs)
return fwi_grad, true_grad
| 35.191919
| 78
| 0.601751
| 2,399
| 13,936
| 3.265527
| 0.073364
| 0.015828
| 0.018509
| 0.013786
| 0.85831
| 0.843758
| 0.830227
| 0.816696
| 0.810442
| 0.80074
| 0
| 0.03832
| 0.256602
| 13,936
| 395
| 79
| 35.281013
| 0.717857
| 0.089337
| 0
| 0.587591
| 0
| 0
| 0.004312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083942
| false
| 0
| 0.029197
| 0.00365
| 0.19708
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
74ff03ae535259dbf715c2c0ec461ede2eff54a5
| 111
|
py
|
Python
|
resnest/d2/__init__.py
|
mohitktanwr/Improved-Inverse-ResNest-Isprs
|
8463d7be0f67c398c91241f47cd7d9e0d235d799
|
[
"Apache-2.0"
] | 3,168
|
2020-04-04T01:22:28.000Z
|
2022-03-31T12:14:50.000Z
|
resnest/d2/__init__.py
|
mohitktanwr/Improved-Inverse-ResNest-Isprs
|
8463d7be0f67c398c91241f47cd7d9e0d235d799
|
[
"Apache-2.0"
] | 138
|
2020-04-04T02:12:30.000Z
|
2022-03-21T03:20:52.000Z
|
resnest/d2/__init__.py
|
mohitktanwr/Improved-Inverse-ResNest-Isprs
|
8463d7be0f67c398c91241f47cd7d9e0d235d799
|
[
"Apache-2.0"
] | 527
|
2020-04-04T05:17:26.000Z
|
2022-03-31T06:15:34.000Z
|
from .resnest import build_resnest_backbone, build_resnest_fpn_backbone
from .config import add_resnest_config
| 37
| 71
| 0.891892
| 16
| 111
| 5.75
| 0.5
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 111
| 2
| 72
| 55.5
| 0.901961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2d219c8f0eb906d3cd9ff27384a6b02c74ed3c6c
| 94
|
py
|
Python
|
build/lib.linux-x86_64-2.7_ucs4/mx/Proxy/mxProxy/testvlad.py
|
mkubux/egenix-mx-base
|
3e6f9186334d9d73743b0219ae857564c7208247
|
[
"eGenix"
] | null | null | null |
build/lib.linux-x86_64-2.7_ucs4/mx/Proxy/mxProxy/testvlad.py
|
mkubux/egenix-mx-base
|
3e6f9186334d9d73743b0219ae857564c7208247
|
[
"eGenix"
] | null | null | null |
build/lib.linux-x86_64-2.7_ucs4/mx/Proxy/mxProxy/testvlad.py
|
mkubux/egenix-mx-base
|
3e6f9186334d9d73743b0219ae857564c7208247
|
[
"eGenix"
] | null | null | null |
from mx.Proxy import WeakProxy
o = []
p = q = WeakProxy(o)
p = q = WeakProxy(o)
del o
print p
| 13.428571
| 30
| 0.648936
| 18
| 94
| 3.388889
| 0.555556
| 0.491803
| 0.360656
| 0.393443
| 0.557377
| 0.557377
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223404
| 94
| 6
| 31
| 15.666667
| 0.835616
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.166667
| null | null | 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
749363556bb426fd959f87762b68430fed9c471b
| 46
|
py
|
Python
|
protfasta/tests/conftest.py
|
holehouse-lab/protfasta
|
9737ed5f65a957bd9ce4727d31e52492ca68dd06
|
[
"MIT"
] | 1
|
2020-10-17T15:46:54.000Z
|
2020-10-17T15:46:54.000Z
|
protfasta/tests/conftest.py
|
holehouse-lab/protfasta
|
9737ed5f65a957bd9ce4727d31e52492ca68dd06
|
[
"MIT"
] | null | null | null |
protfasta/tests/conftest.py
|
holehouse-lab/protfasta
|
9737ed5f65a957bd9ce4727d31e52492ca68dd06
|
[
"MIT"
] | null | null | null |
import protfasta
import pytest
import sys
| 5.75
| 16
| 0.782609
| 6
| 46
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 46
| 7
| 17
| 6.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7499b19a7d5bfb3795251c8a3afcd64e01e78496
| 18,814
|
py
|
Python
|
tests/shared/test_validation.py
|
StephenGill/govuk-shielded-vulnerable-people-service
|
fbb74de933ffd3080c84611de067ba336bfa5518
|
[
"MIT"
] | null | null | null |
tests/shared/test_validation.py
|
StephenGill/govuk-shielded-vulnerable-people-service
|
fbb74de933ffd3080c84611de067ba336bfa5518
|
[
"MIT"
] | null | null | null |
tests/shared/test_validation.py
|
StephenGill/govuk-shielded-vulnerable-people-service
|
fbb74de933ffd3080c84611de067ba336bfa5518
|
[
"MIT"
] | null | null | null |
import pytest
from unittest.mock import patch
from vulnerable_people_form.form_pages.shared import validation
from flask import Flask
from vulnerable_people_form.form_pages.shared.answers_enums import (
ApplyingOnOwnBehalfAnswers,
MedicalConditionsAnswers,
NHSLetterAnswers,
ViewOrSetupAnswers,
YesNoAnswers,
PrioritySuperMarketDeliveriesAnswers)
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME = \
"vulnerable_people_form.form_pages.shared.validation.form_answers"
_current_app = Flask(__name__)
_current_app.secret_key = 'test_secret'
_radio_button_negative_test_data = ["148", 99, "test_invalid_enum_value"]
_yes_no_radio_button_positive_test_data = [e.value for e in YesNoAnswers]
def test_validate_name_should_return_true_when_first_name_and_surname_entered():
def create_form_answers_with_first_name_and_surname():
return {
"name": {"first_name": "jon", "middle_name": "", "last_name": "smith"}
}
with patch(
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME,
create_form_answers_with_first_name_and_surname), \
_current_app.test_request_context() as test_request_ctx:
test_request_ctx.session["form_answers"] = create_form_answers_with_first_name_and_surname()
is_valid = validation.validate_name()
assert len(test_request_ctx.session) == 1
assert is_valid is True
def test_validate_name_should_return_false_when_only_first_name_entered():
def create_form_answers_with_first_name_only():
return {'name': {'first_name': 'jon', 'middle_name': '', 'last_name': ''}}
with patch(
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME,
create_form_answers_with_first_name_only), \
_current_app.test_request_context() as test_request_ctx:
test_request_ctx.session["form_answers"] = create_form_answers_with_first_name_only()
is_valid = validation.validate_name()
assert is_valid is False
assert len(test_request_ctx.session["error_items"]) == 1
assert test_request_ctx.session["error_items"]["name"]["last_name"] == "Enter your last name"
def test_validate_name_should_return_false_when_only_last_name_entered():
def create_form_answers_with_last_name_only():
return {'name': {'first_name': '', 'middle_name': '', 'last_name': 'Smith'}}
with patch(
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME,
create_form_answers_with_last_name_only), \
_current_app.test_request_context() as test_request_ctx:
test_request_ctx.session["form_answers"] = create_form_answers_with_last_name_only()
is_valid = validation.validate_name()
assert is_valid is False
assert len(test_request_ctx.session["error_items"]) == 1
assert test_request_ctx.session["error_items"]["name"]["first_name"] == "Enter your first name"
@pytest.mark.parametrize("form_field_value", _radio_button_negative_test_data)
def test_validate_applying_on_own_behalf_should_return_false_when_invalid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_failed(
validation.validate_applying_on_own_behalf,
form_field_value,
"applying_on_own_behalf",
"Select yes if you are applying on your own behalf"
)
@pytest.mark.parametrize("form_field_value", [e.value for e in ApplyingOnOwnBehalfAnswers])
def test_validate_applying_on_own_behalf_should_return_true_when_valid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_passed(
validation.validate_applying_on_own_behalf,
form_field_value,
"applying_on_own_behalf"
)
@pytest.mark.parametrize("form_field_value", _radio_button_negative_test_data)
def test_validate_nhs_letter_should_return_false_when_invalid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_failed(
validation.validate_nhs_letter,
form_field_value,
"nhs_letter",
"Select if you received the letter from the NHS"
)
@pytest.mark.parametrize("form_field_value", [e.value for e in NHSLetterAnswers])
def test_validate_nhs_letter_should_return_true_when_valid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_passed(
validation.validate_nhs_letter,
form_field_value,
"nhs_letter"
)
@pytest.mark.parametrize("form_field_value", _radio_button_negative_test_data)
def test_validate_nhs_login_should_return_false_when_invalid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_failed(
validation.validate_nhs_login,
form_field_value,
"nhs_login",
"Select yes if you want log in with you NHS details"
)
@pytest.mark.parametrize("form_field_value", _yes_no_radio_button_positive_test_data)
def test_validate_nhs_login_should_return_true_when_valid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_passed(
validation.validate_nhs_login,
form_field_value,
"nhs_login"
)
@pytest.mark.parametrize("form_field_value", _radio_button_negative_test_data)
def test_validate_register_with_nhs_should_return_false_when_invalid_answer_selected(form_field_value):
_populate_request_form_and_execute_input_validation_test_and_assert_validation_failed(
validation.validate_register_with_nhs,
form_field_value,
"nhs_registration",
"You need to select if you want to register an account with the NHS"
+ " in order to retrieve your answers at a alater point."
)
@pytest.mark.parametrize("form_field_value", _yes_no_radio_button_positive_test_data)
def test_validate_register_with_nhs_should_return_true_when_valid_answer_selected(form_field_value):
_populate_request_form_and_execute_input_validation_test_and_assert_validation_passed(
validation.validate_register_with_nhs,
form_field_value,
"nhs_registration"
)
@pytest.mark.parametrize("form_field_value", [e.value for e in ViewOrSetupAnswers])
def test_validate_view_or_setup_should_return_true_when_valid_answer_selected(form_field_value):
_populate_request_form_and_execute_input_validation_test_and_assert_validation_passed(
validation.validate_view_or_setup,
form_field_value,
"view_or_setup"
)
@pytest.mark.parametrize("form_field_value", _radio_button_negative_test_data)
def test_validate_view_or_setup_should_return_false_when_invalid_answer_selected(form_field_value):
_populate_request_form_and_execute_input_validation_test_and_assert_validation_failed(
validation.validate_view_or_setup,
form_field_value,
"view_or_setup",
"You must select if you would like to set up an account, or access an account via your NHS Login."
)
@pytest.mark.parametrize("form_field_value", _radio_button_negative_test_data)
def test_validate_medical_conditions_should_return_false_when_invalid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_failed(
validation.validate_medical_conditions,
form_field_value,
"medical_conditions",
"Select yes if you have one of the medical conditions on the list"
)
@pytest.mark.parametrize("form_field_value", [e.value for e in MedicalConditionsAnswers])
def test_validate_medical_conditions_should_return_true_when_valid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_passed(
validation.validate_medical_conditions,
form_field_value,
"medical_conditions"
)
@pytest.mark.parametrize("form_field_value", [e.value for e in PrioritySuperMarketDeliveriesAnswers])
def test_validate_priority_supermarket_deliveries_should_return_true_when_valid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_passed(
validation.validate_priority_supermarket_deliveries,
form_field_value,
"priority_supermarket_deliveries"
)
@pytest.mark.parametrize("form_field_value", _radio_button_negative_test_data)
def test_validate_priority_supermarket_deliveries_should_return_false_when_invalid_answer_selected(
form_field_value):
_execute_input_validation_test_and_assert_validation_failed(
validation.validate_priority_supermarket_deliveries,
form_field_value,
"priority_supermarket_deliveries",
"Select if you want priority supermarket deliveries"
)
@pytest.mark.parametrize("form_field_value", _yes_no_radio_button_positive_test_data)
def test_validate_do_you_have_someone_to_go_shopping_for_you_should_return_true_when_valid_answer_selected(
form_field_value):
_execute_input_validation_test_and_assert_validation_passed(
validation.validate_do_you_have_someone_to_go_shopping_for_you,
form_field_value,
"do_you_have_someone_to_go_shopping_for_you"
)
@pytest.mark.parametrize("form_field_value", _radio_button_negative_test_data)
def test_validate_do_you_have_someone_to_go_shopping_for_you_should_return_false_when_invalid_answer_selected(
form_field_value):
_execute_input_validation_test_and_assert_validation_failed(
validation.validate_do_you_have_someone_to_go_shopping_for_you,
form_field_value,
"do_you_have_someone_to_go_shopping_for_you",
"Select yes if you have someone who can go shopping for you"
)
@pytest.mark.parametrize("form_field_value", ["", None])
def test_validate_address_lookup_should_return_false_when_no_address_present(form_field_value):
_populate_request_form_and_execute_input_validation_test_and_assert_validation_failed(
validation.validate_address_lookup,
form_field_value,
"address",
"You must select an address",
"address_lookup"
)
def test_validate_address_lookup_should_return_true_when_address_present():
_populate_request_form_and_execute_input_validation_test_and_assert_validation_passed(
validation.validate_address_lookup,
"{"uprn": 72277644, "town_city": "Pudsey", " +
""postcode": "LS28 8JR", "building_and_street_line_1": " +
""2 Galloway Lane", "building_and_street_line_2": ""}",
"address"
)
def test_validate_postcode_should_return_true_when_valid_postcode_present():
with _current_app.test_request_context() as test_request_ctx:
is_valid = validation.validate_postcode("LS1 6AE", "postcode")
assert is_valid is True
assert len(test_request_ctx.session) == 0
@pytest.mark.parametrize("postcode", [""])
def test_validate_postcode_should_return_false_when_no_postcode_present(postcode):
with _current_app.test_request_context() as test_request_ctx:
is_valid = validation.validate_postcode(postcode, "postcode")
assert is_valid is False
assert len(test_request_ctx.session) == 1
assert test_request_ctx.session["error_items"]["postcode"]["postcode"] \
== "What is the postcode where you need support?"
@pytest.mark.parametrize("postcode", [" ", "invalid_post_code", "ssss 12345"])
def test_validate_postcode_should_return_false_when_invalid_postcode_present(postcode):
with _current_app.test_request_context() as test_request_ctx:
is_valid = validation.validate_postcode(postcode, "postcode")
assert is_valid is False
assert len(test_request_ctx.session) == 1
assert test_request_ctx.session["error_items"]["postcode"]["postcode"] == "Enter a real postcode"
@pytest.mark.parametrize("form_field_value", _radio_button_negative_test_data)
def test_validate_basic_care_needs_should_return_false_when_invalid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_failed(
validation.validate_basic_care_needs,
form_field_value,
"basic_care_needs",
"Select yes if your basic care needs are being met at the moment"
)
@pytest.mark.parametrize("form_field_value", _yes_no_radio_button_positive_test_data)
def test_validate_basic_care_needs_should_return_true_when_valid_answer_selected(form_field_value):
_execute_input_validation_test_and_assert_validation_passed(
validation.validate_basic_care_needs,
form_field_value,
"basic_care_needs"
)
@pytest.mark.parametrize("form_field_value", ["", None, "123"])
def test_validate_nhs_number_should_return_false_when_empty_or_invalid_length_nhs_number_entered(
form_field_value):
_execute_input_validation_test_and_assert_validation_failed(
validation.validate_nhs_number,
form_field_value,
"nhs_number",
"Enter your 10-digit NHS number"
)
@pytest.mark.parametrize("form_field_value", ["1234567891", "abcd123456"])
def test_validate_nhs_number_should_return_false_when_invalid_nhs_number_entered(form_field_value):
_execute_input_validation_test_and_assert_validation_failed(
validation.validate_nhs_number,
form_field_value,
"nhs_number",
"Enter a real NHS number"
)
def test_validate_nhs_number_should_return_true_when_valid_nhs_number_entered():
_execute_input_validation_test_and_assert_validation_passed(
validation.validate_nhs_number,
"9686368604",
"nhs_number"
)
@pytest.mark.parametrize("form_field_value, expected_error_msg", [
("sfsdf-sfdsfsd", "Enter an email address in the correct format, like name@example.com"),
("invalid@email", "Enter an email address in the correct format, like name@example.com")])
def test_validate_email_if_present_should_return_false_when_invalid_email_entered(form_field_value, expected_error_msg):
def create_form_answers():
return {"contact_details": {"email": form_field_value}}
with patch(
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME,
create_form_answers), \
_current_app.test_request_context() as test_request_ctx:
is_valid = validation.validate_email_if_present("contact_details", "email")
_make_validation_failure_assertions(
is_valid,
test_request_ctx.session,
"email",
expected_error_msg,
"contact_details")
def test_validate_email_if_present_should_return_true_when_valid_email_entered():
def create_form_answers():
return {"contact_details": {"email": "my-valid.email@gmail.com"}}
with patch(
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME,
create_form_answers), \
_current_app.test_request_context() as test_request_ctx:
is_valid = validation.validate_email_if_present("contact_details", "email")
assert is_valid is True
assert len(test_request_ctx.session) == 0
def test_validate_email_if_present_should_return_true_when_no_email_entered():
def create_form_answers():
return {"contact_details": {"email": ""}}
with patch(
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME,
create_form_answers), \
_current_app.test_request_context() as test_request_ctx:
is_valid = validation.validate_email_if_present("contact_details", "email")
assert is_valid is True
assert len(test_request_ctx.session) == 0
@pytest.mark.parametrize("form_field", [None, ""])
def test_validate_phone_number_if_present_should_return_true_when_no_email_entered(form_field):
def create_form_answers():
return {"contact_details": {"phone_number_calls": ""}}
with patch(
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME,
create_form_answers), \
_current_app.test_request_context() as test_request_ctx:
is_valid = validation.validate_phone_number_if_present("contact_details", "phone_number_calls")
assert is_valid is True
assert len(test_request_ctx.session) == 0
def _populate_request_form_and_execute_input_validation_test_and_assert_validation_failed(
validation_function, form_field_value, form_field, validation_error_msg, session_error_items_key=None):
with _current_app.test_request_context(
"any-test-url",
data={form_field: form_field_value}) as test_request_ctx:
is_valid = validation_function()
_make_validation_failure_assertions(is_valid, test_request_ctx.session,
form_field, validation_error_msg, session_error_items_key)
def _populate_request_form_and_execute_input_validation_test_and_assert_validation_passed(
validation_function, form_field_value, form_field):
with _current_app.test_request_context(
"any-test-url",
data={form_field: form_field_value}) as test_request_ctx:
is_valid = validation_function()
assert is_valid is True
assert len(test_request_ctx.session) == 0
def _execute_input_validation_test_and_assert_validation_passed(validation_function, form_field_value, form_field):
def create_form_answers():
return {form_field: form_field_value}
with patch(
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME,
create_form_answers), \
_current_app.test_request_context() as test_request_ctx:
is_valid = validation_function()
assert is_valid is True
assert len(test_request_ctx.session) == 0
def _execute_input_validation_test_and_assert_validation_failed(validation_function, form_field_value, form_field,
validation_error_msg, session_error_items_key=None):
def create_form_answers():
return {} if form_field_value is None else {form_field: form_field_value}
with patch(
_FORM_ANSWERS_FUNCTION_FULLY_QUALIFIED_NAME,
create_form_answers), \
_current_app.test_request_context() as test_request_ctx:
is_valid = validation_function()
_make_validation_failure_assertions(is_valid, test_request_ctx.session,
form_field, validation_error_msg, session_error_items_key)
def _make_validation_failure_assertions(is_valid, session, form_field,
validation_error_msg, session_error_items_key=None):
assert is_valid is False
assert len(session["error_items"]) == 1
error_items_key = session_error_items_key if session_error_items_key else form_field
assert session["error_items"][error_items_key][form_field] == validation_error_msg
| 42.469526
| 120
| 0.76459
| 2,423
| 18,814
| 5.34544
| 0.084606
| 0.062539
| 0.081069
| 0.0542
| 0.873301
| 0.848826
| 0.823502
| 0.786211
| 0.742588
| 0.697807
| 0
| 0.004405
| 0.167429
| 18,814
| 442
| 121
| 42.565611
| 0.822459
| 0
| 0
| 0.508721
| 0
| 0.011628
| 0.145424
| 0.02243
| 0
| 0
| 0
| 0
| 0.174419
| 1
| 0.136628
| false
| 0.037791
| 0.014535
| 0.026163
| 0.177326
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
77726687d353f7cf8dff983b28c1e2f452c8c055
| 15,157
|
py
|
Python
|
src/CardActions/Renaissance.py
|
cevirici/woodcutter
|
f775e002475e80662faffeeed966306c36916da1
|
[
"MIT"
] | null | null | null |
src/CardActions/Renaissance.py
|
cevirici/woodcutter
|
f775e002475e80662faffeeed966306c36916da1
|
[
"MIT"
] | 6
|
2021-03-19T10:48:21.000Z
|
2022-02-10T10:34:24.000Z
|
woodcutter/src/CardActions/Renaissance.py
|
cevirici/dominion-woodcutter
|
3eea6081a180499bf5e370877146f3ca2eb1c068
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .CardInfo import CardInfo
from woodcutter.src.Card import *
from woodcutter.src.Action import Action
class ACTING_TROUPE(CardInfo):
names = ["Acting Troupe", "Acting Troupes", "an Acting Troupe"]
types = [Types.ACTION]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class BORDER_GUARD(CardInfo):
names = ["Border Guard", "Border Guards", "a Border Guard"]
types = [Types.ACTION]
cost = [2, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class CARGO_SHIP(CardInfo):
names = ["Cargo Ship", "Cargo Ships", "a Cargo Ship"]
types = [Types.ACTION, Types.DURATION]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class DUCAT(CardInfo):
names = ["Ducat", "Ducats", "a Ducat"]
types = [Types.TREASURE]
cost = [2, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class EXPERIMENT(CardInfo):
names = ["Experiment", "Experiments", "an Experiment"]
types = [Types.ACTION]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class FLAG_BEARER(CardInfo):
names = ["Flag Bearer", "Flag Bearers", "a Flag Bearer"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class HIDEOUT(CardInfo):
names = ["Hideout", "Hideouts", "a Hideout"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class INVENTOR(CardInfo):
names = ["Inventor", "Inventors", "an Inventor"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class IMPROVE(CardInfo):
names = ["Improve", "Improves", "an Improve"]
types = [Types.ACTION]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class LACKEYS(CardInfo):
names = ["Lackeys", "Lackeys", "a Lackeys"]
types = [Types.ACTION]
cost = [2, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class MOUNTAIN_VILLAGE(CardInfo):
names = ["Mountain Village", "Mountain Villages", "a Mountain Village"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class PATRON(CardInfo):
names = ["Patron", "Patrons", "a Patron"]
types = [Types.ACTION, Types.REACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class PRIEST(CardInfo):
names = ["Priest", "Priests", "a Priest"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class RESEARCH(CardInfo):
names = ["Research", "Researches", "a Research"]
types = [Types.ACTION, Types.DURATION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SILK_MERCHANT(CardInfo):
names = ["Silk Merchant", "Silk Merchants", "a Silk Merchant"]
types = [Types.ACTION]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class OLD_WITCH(CardInfo):
names = ["Old Witch", "Old Witches", "an Old Witch"]
types = [Types.ACTION, Types.ATTACK]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class RECRUITER(CardInfo):
names = ["Recruiter", "Recruiters", "a Recruiter"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SCEPTER(CardInfo):
names = ["Scepter", "Scepters", "a Scepter"]
types = [Types.TREASURE]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SCHOLAR(CardInfo):
names = ["Scholar", "Scholars", "a Scholar"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SCULPTOR(CardInfo):
names = ["Sculptor", "Sculptors", "a Sculptor"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SEER(CardInfo):
names = ["Seer", "Seers", "a Seer"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SPICES(CardInfo):
names = ["Spices", "Spices", "a Spices"]
types = [Types.TREASURE]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SWASHBUCKLER(CardInfo):
names = ["Swashbuckler", "Swashbucklers", "a Swashbuckler"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class TREASURER(CardInfo):
names = ["Treasurer", "Treasurers", "a Treasurer"]
types = [Types.ACTION]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class VILLAIN(CardInfo):
names = ["Villain", "Villains", "a Villain"]
types = [Types.ACTION, Types.ATTACK]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class FLAG(CardInfo):
names = ["Flag", "Flags", "the Flag"]
types = [Types.ARTIFACT]
cost = [0, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class HORN(CardInfo):
names = ["Horn", "Horns", "the Horn"]
types = [Types.ARTIFACT]
cost = [0, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class KEY(CardInfo):
names = ["Key", "Keys", "the Key"]
types = [Types.ARTIFACT]
cost = [0, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class LANTERN(CardInfo):
names = ["Lantern", "Lanterns", "the Lantern"]
types = [Types.ARTIFACT]
cost = [0, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class TREASURE_CHEST(CardInfo):
names = ["Treasure Chest", "Treasure Chests", "the Treasure Chest"]
types = [Types.ARTIFACT]
cost = [0, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class ACADEMY(CardInfo):
names = ["Academy", "Academy", "Academy"]
types = [Types.PROJECT]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class BARRACKS(CardInfo):
names = ["Barracks", "Barracks", "Barracks"]
types = [Types.PROJECT]
cost = [6, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class CANAL(CardInfo):
names = ["Canal", "Canal", "Canal"]
types = [Types.PROJECT]
cost = [7, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class CAPITALISM(CardInfo):
names = ["Capitalism", "Capitalism", "Capitalism"]
types = [Types.PROJECT]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class CATHEDRAL(CardInfo):
names = ["Cathedral", "Cathedral", "Cathedral"]
types = [Types.PROJECT]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class CITADEL(CardInfo):
names = ["Citadel", "Citadel", "Citadel"]
types = [Types.PROJECT]
cost = [8, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class CITY_GATE(CardInfo):
names = ["City Gate", "City Gate", "City Gate"]
types = [Types.PROJECT]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class CROP_ROTATION(CardInfo):
names = ["Crop Rotation", "Crop Rotation", "Crop Rotation"]
types = [Types.PROJECT]
cost = [6, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class EXPLORATION(CardInfo):
names = ["Exploration", "Exploration", "Exploration"]
types = [Types.PROJECT]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class FAIR(CardInfo):
names = ["Fair", "Fair", "Fair"]
types = [Types.PROJECT]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class FLEET(CardInfo):
names = ["Fleet", "Fleet", "Fleet"]
types = [Types.PROJECT]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class GUILDHALL(CardInfo):
names = ["Guildhall", "Guildhall", "Guildhall"]
types = [Types.PROJECT]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class INNOVATION(CardInfo):
names = ["Innovation", "Innovation", "Innovation"]
types = [Types.PROJECT]
cost = [6, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class PAGEANT(CardInfo):
names = ["Pageant", "Pageant", "Pageant"]
types = [Types.PROJECT]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class PIAZZA(CardInfo):
names = ["Piazza", "Piazza", "Piazza"]
types = [Types.PROJECT]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class ROAD_NETWORK(CardInfo):
names = ["Road Network", "Road Network", "Road Network"]
types = [Types.PROJECT]
cost = [5, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SEWERS(CardInfo):
names = ["Sewers", "Sewers", "Sewers"]
types = [Types.PROJECT]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SILOS(CardInfo):
names = ["Silos", "Silos", "Silos"]
types = [Types.PROJECT]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class SINISTER_PLOT(CardInfo):
names = ["Sinister Plot", "Sinister Plot", "Sinister Plot"]
types = [Types.PROJECT]
cost = [4, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
class STAR_CHART(CardInfo):
names = ["Star Chart", "Star Chart", "Star Chart"]
types = [Types.PROJECT]
cost = [3, 0, 0]
def onPlay(self, state, log, cardIndex):
state = deepcopy(state)
state.stack += []
state.candidates = state.stack.pop()
return state
| 25.052893
| 75
| 0.579996
| 1,710
| 15,157
| 5.133333
| 0.076023
| 0.113921
| 0.02848
| 0.062657
| 0.713033
| 0.713033
| 0.706425
| 0.706425
| 0.706425
| 0.706425
| 0
| 0.01377
| 0.276506
| 15,157
| 604
| 76
| 25.094371
| 0.786704
| 0.001386
| 0
| 0.766004
| 0
| 0
| 0.089203
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.110375
| false
| 0
| 0.006623
| 0
| 0.668874
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
779b9225ab3c366d85d1607a29ea5e5a19e3cc7d
| 44
|
py
|
Python
|
qcodes/instrument_drivers/sqdlab/dsp/__init__.py
|
sqdlab/Qcodes
|
82a4706028cd8eaef8669ff978c704419debc447
|
[
"MIT"
] | null | null | null |
qcodes/instrument_drivers/sqdlab/dsp/__init__.py
|
sqdlab/Qcodes
|
82a4706028cd8eaef8669ff978c704419debc447
|
[
"MIT"
] | null | null | null |
qcodes/instrument_drivers/sqdlab/dsp/__init__.py
|
sqdlab/Qcodes
|
82a4706028cd8eaef8669ff978c704419debc447
|
[
"MIT"
] | 1
|
2020-04-24T01:15:44.000Z
|
2020-04-24T01:15:44.000Z
|
from .pyopencl_ import *
from . import fft
| 11
| 24
| 0.727273
| 6
| 44
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204545
| 44
| 3
| 25
| 14.666667
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
77d21c695a173b6318dc0c522f4acb46bbeb8012
| 6,113
|
py
|
Python
|
test/test_match.py
|
gbenetz/pvcheck
|
e165939dc6b9ba75ee70a79aa7a2ef4637356bae
|
[
"MIT"
] | 3
|
2016-04-12T21:42:52.000Z
|
2020-04-20T10:58:02.000Z
|
test/test_match.py
|
gbenetz/pvcheck
|
e165939dc6b9ba75ee70a79aa7a2ef4637356bae
|
[
"MIT"
] | 6
|
2015-10-16T13:13:50.000Z
|
2020-05-30T20:42:22.000Z
|
test/test_match.py
|
gbenetz/pvcheck
|
e165939dc6b9ba75ee70a79aa7a2ef4637356bae
|
[
"MIT"
] | 1
|
2019-06-10T08:51:54.000Z
|
2019-06-10T08:51:54.000Z
|
import unittest
import sys
sys.path.insert(0, '../src')
from match import *
class TestOrderedComparisons(unittest.TestCase):
def test_compare_sections1(self):
exp = ['a b c d', 'efg hij']
diffs, matches = compare_sections(['a b c d', 'efg hij'], exp)
self.assertEqual(diffs, [0.0, 0.0])
self.assertEqual(matches, ['a b c d', 'efg hij'])
def test_compare_sections2(self):
exp = ['a b c d', 'efg hij']
diffs, matches = compare_sections(['b c a d', 'efg hij'], exp)
self.assertEqual(diffs, [0.75, 0.0])
self.assertEqual(matches, ['a b c d', 'efg hij'])
def test_compare_sections3(self):
exp = ['a b c d', 'efg hij']
diffs, matches = compare_sections(['a b c d'], exp)
self.assertEqual(diffs, [0.0, 1.0])
self.assertEqual(matches, ['a b c d', 'efg hij'])
def test_compare_sections4(self):
exp = ['a b c d', 'efg hij']
diffs, matches = compare_sections(['efg hij'], exp)
self.assertEqual(diffs, [1.0, 1.0])
self.assertEqual(matches, ['a b c d', 'efg hij'])
def test_compare_sections5(self):
exp = ['a b c d', 'efg hij']
diffs, matches = compare_sections(['a b c d', 'efg hij',
'extra'], exp)
self.assertEqual(diffs, [0.0, 0.0, 1.0])
self.assertEqual(matches, ['a b c d', 'efg hij', None])
def test_compare_sections6(self):
exp = ['a b c d', 'efg hij']
diffs, matches = compare_sections([], exp)
self.assertEqual(diffs, [1.0, 1.0])
self.assertEqual(matches, ['a b c d', 'efg hij'])
def test_compare_sections7(self):
diffs, matches = compare_sections(['a b c d', 'efg hij'], [])
self.assertEqual(diffs, [1.0, 1.0])
self.assertEqual(matches, [None, None])
def test_compare_sections7(self):
diffs, matches = compare_sections([], [])
self.assertEqual(diffs, [])
self.assertEqual(matches, [])
class TestUnorderedComparisons(unittest.TestCase):
def test_compare_sections1(self):
exp = ['a', 'b', 'c']
diffs, matches = compare_sections(['a', 'b', 'c'], exp, False)
self.assertEqual(diffs, [0.0, 0.0, 0.0])
self.assertEqual(matches, ['a', 'b', 'c'])
def test_compare_sections2(self):
exp = ['a', 'b', 'c']
diffs, matches = compare_sections(['b', 'a', 'c'], exp, False)
self.assertEqual(diffs, [0.0, 0.0, 0.0])
self.assertEqual(matches, ['b', 'a', 'c'])
def test_compare_sections3(self):
exp = ['a', 'b', 'c']
diffs, matches = compare_sections(['b', 'c', 'a'], exp, False)
self.assertEqual(diffs, [0.0, 0.0, 0.0])
self.assertEqual(matches, ['b', 'c', 'a'])
def test_compare_sections4(self):
exp = ['a', 'b', 'c']
diffs, matches = compare_sections(['a', 'c', 'x'], exp, False)
self.assertEqual(diffs, [0.0, 0.0, 1.0, 1.0])
self.assertEqual(matches, ['a', 'c', None, 'b'])
def test_compare_sections5(self):
exp = ['a', 'b', 'c']
diffs, matches = compare_sections(['aa', 'c', 'b'], exp, False)
self.assertEqual(diffs, [1.0, 0.0, 0.0, 1.0])
self.assertEqual(matches, [None, 'c', 'b', 'a'])
def test_compare_sections6(self):
exp = ['a', 'b', 'c']
diffs, matches = compare_sections(['x', 'y'], exp, False)
self.assertEqual(diffs, [1.0, 1.0, 1.0, 1.0, 1.0])
self.assertEqual(matches, [None, None, 'a', 'b', 'c'])
def test_compare_sections7(self):
exp = ['a', 'b', 'c']
diffs, matches = compare_sections([], exp, False)
self.assertEqual(diffs, [1.0, 1.0, 1.0])
self.assertEqual(matches, ['a', 'b', 'c'])
def test_compare_sections8(self):
diffs, matches = compare_sections(['x', 'y'], [], False)
self.assertEqual(diffs, [1.0, 1.0])
self.assertEqual(matches, [None, None])
def test_compare_sections9(self):
diffs, matches = compare_sections([], [], False)
self.assertEqual(diffs, [])
self.assertEqual(matches, [])
class TestFieldComparisons(unittest.TestCase):
def test_compare_text1(self):
diffs, matches = compare_sections([' abc\t\tdef '],
[' abc def'])
self.assertEqual(diffs, [0.0])
self.assertEqual(matches, [' abc def'])
def test_compare_text2(self):
diffs, matches = compare_sections(['ABC'], ['abc'])
self.assertEqual(diffs, [1.0])
self.assertEqual(matches, ['abc'])
def test_compare_int1(self):
diffs, matches = compare_sections(['42', '+42', '042'],
['42', '42', '42'])
self.assertEqual(diffs, [0.0] * 3)
self.assertEqual(matches, ['42'] * 3)
def test_compare_int2(self):
diffs, matches = compare_sections(['0', '+0', '-0', '00'],
['0'] * 4)
self.assertEqual(diffs, [0.0] * 4)
self.assertEqual(matches, ['0'] * 4)
def test_compare_float1(self):
exp = '3. 3.1 3.14 3.141 31.1415'.split()
diffs, matches = compare_sections(['3.14'] * 5, exp)
self.assertEqual(diffs, [0.0, 0.0, 0.0, 1.0, 1.0])
self.assertEqual(matches, exp)
def test_compare_float2(self):
diffs, matches = compare_sections(['3.5'], ['+4.'])
self.assertEqual(diffs, [0.0])
self.assertEqual(matches, ['+4.'])
def test_compare_float3(self):
diffs, matches = compare_sections(['3.4999999'], ['+4.'])
self.assertEqual(diffs, [1.0])
self.assertEqual(matches, ['+4.'])
def test_compare_float4(self):
diffs, matches = compare_sections(['-3.5'], ['-4.'])
self.assertEqual(diffs, [0.0])
self.assertEqual(matches, ['-4.'])
def test_compare_float5(self):
diffs, matches = compare_sections(['-3.4999999'], ['-4.'])
self.assertEqual(diffs, [1.0])
self.assertEqual(matches, ['-4.'])
if __name__ == '__main__':
unittest.main()
| 37.734568
| 71
| 0.55292
| 797
| 6,113
| 4.132999
| 0.091593
| 0.236794
| 0.02459
| 0.213115
| 0.867942
| 0.811475
| 0.756831
| 0.716151
| 0.665452
| 0.526108
| 0
| 0.047218
| 0.262064
| 6,113
| 161
| 72
| 37.968944
| 0.682997
| 0
| 0
| 0.442748
| 0
| 0
| 0.076722
| 0
| 0
| 0
| 0
| 0
| 0.396947
| 1
| 0.198473
| false
| 0
| 0.022901
| 0
| 0.244275
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
77ee3fa6a880e9e12ba39f50a9af2b3e0e4b1c44
| 34
|
py
|
Python
|
Lib/fonext/utLib/__init__.py
|
derwind/fonext
|
bcc93acb1f31a658b49f44e19497390503042d16
|
[
"MIT"
] | null | null | null |
Lib/fonext/utLib/__init__.py
|
derwind/fonext
|
bcc93acb1f31a658b49f44e19497390503042d16
|
[
"MIT"
] | null | null | null |
Lib/fonext/utLib/__init__.py
|
derwind/fonext
|
bcc93acb1f31a658b49f44e19497390503042d16
|
[
"MIT"
] | null | null | null |
from fonext.utLib.utFont import *
| 17
| 33
| 0.794118
| 5
| 34
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bb29ddec3c5571c0ce8002b9eb84e4e3b3c58c92
| 1,942
|
py
|
Python
|
template_conf.py
|
mcrrobinson/Zoom-Automation
|
81728376372ba51e8105227c1b1400f1db6f6e2f
|
[
"Apache-2.0"
] | null | null | null |
template_conf.py
|
mcrrobinson/Zoom-Automation
|
81728376372ba51e8105227c1b1400f1db6f6e2f
|
[
"Apache-2.0"
] | null | null | null |
template_conf.py
|
mcrrobinson/Zoom-Automation
|
81728376372ba51e8105227c1b1400f1db6f6e2f
|
[
"Apache-2.0"
] | null | null | null |
# THIS IS STATIC
WEEKDAYS = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday"
}
CLASS_MAP = {
1: "Art",
2: "Geography",
3: "Science"
}
ENTRIES = {
"Monday": {
1: {
"class_name": CLASS_MAP[1],
"meeting_time": [9,10],
"meeting_link": "zoommtg://port-ac-uk.zoom.us/join?action=join&confno=5453452341&pwd=c8j88912u391n8m2d98sumd98u1"
},
2: {
"class_name": CLASS_MAP[2],
"meeting_time": [10,12],
"meeting_link": "zoommtg://port-ac-uk.zoom.us/join?action=join&confno=5453452341&pwd=c8j88912u391n8m2d98sumd98u1"
},
3: {
"class_name": CLASS_MAP[3],
"meeting_time": [12,13],
"meeting_link": "zoommtg://port-ac-uk.zoom.us/join?action=join&confno=5453452341&pwd=c8j88912u391n8m2d98sumd98u1"
}
},
"Tuesday": {
1: {
"class_name": CLASS_MAP[2],
"meeting_time": [11,12],
"meeting_link": "zoommtg://port-ac-uk.zoom.us/join?action=join&confno=5453452341&pwd=c8j88912u391n8m2d98sumd98u1"
},
},
"Wednesday": {
1: {
"class_name": CLASS_MAP[3],
"meeting_time": [12,13],
"meeting_link": "zoommtg://port-ac-uk.zoom.us/join?action=join&confno=5453452341&pwd=c8j88912u391n8m2d98sumd98u1"
}
},
"Thursday": {
1: {
"class_name": CLASS_MAP[1],
"meeting_time": [14,15],
"meeting_link": "zoommtg://port-ac-uk.zoom.us/join?action=join&confno=5453452341&pwd=c8j88912u391n8m2d98sumd98u1"
},
2: {
"class_name": CLASS_MAP[2],
"meeting_time": [17,18],
"meeting_link": "zoommtg://port-ac-uk.zoom.us/join?action=join&confno=5453452341&pwd=c8j88912u391n8m2d98sumd98u1"
}
},
"Friday": {},
}
| 31.322581
| 125
| 0.545829
| 207
| 1,942
| 4.980676
| 0.236715
| 0.062076
| 0.095053
| 0.115422
| 0.838991
| 0.837051
| 0.837051
| 0.808923
| 0.750727
| 0.750727
| 0
| 0.167988
| 0.285788
| 1,942
| 62
| 126
| 31.322581
| 0.575342
| 0.007209
| 0
| 0.366667
| 0
| 0.116667
| 0.523093
| 0.345096
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bb2afbf182f7fea7f1b9d7e037412342971d7a87
| 181
|
py
|
Python
|
try2.py
|
faisalarkan21/mbti-test-qt
|
06d701ba9f95ee278b029d122908b18cac07033d
|
[
"Apache-2.0"
] | null | null | null |
try2.py
|
faisalarkan21/mbti-test-qt
|
06d701ba9f95ee278b029d122908b18cac07033d
|
[
"Apache-2.0"
] | null | null | null |
try2.py
|
faisalarkan21/mbti-test-qt
|
06d701ba9f95ee278b029d122908b18cac07033d
|
[
"Apache-2.0"
] | null | null | null |
for x in range (5,1,-1):
print x * " " + (6-x)* '*'
for z in range (1,11,10):
print z * " " + (11-z) * '*'
for w in range(1,5,+1):
print " "+ (5-w) * '*'
| 20.111111
| 33
| 0.375691
| 32
| 181
| 2.125
| 0.375
| 0.308824
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 0.353591
| 181
| 8
| 34
| 22.625
| 0.452991
| 0
| 0
| 0
| 0
| 0
| 0.063584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
bb5c6d61a0e5953be08f7dfb36dce556a364d306
| 32
|
py
|
Python
|
angr/analyses/typehoon/__init__.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 6,132
|
2015-08-06T23:24:47.000Z
|
2022-03-31T21:49:34.000Z
|
angr/analyses/typehoon/__init__.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 2,272
|
2015-08-10T08:40:07.000Z
|
2022-03-31T23:46:44.000Z
|
angr/analyses/typehoon/__init__.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 1,155
|
2015-08-06T23:37:39.000Z
|
2022-03-31T05:54:11.000Z
|
from .typehoon import Typehoon
| 10.666667
| 30
| 0.8125
| 4
| 32
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 2
| 31
| 16
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
247e061159b8c87990b6e76f6816772bf4d1cba7
| 10,790
|
py
|
Python
|
models/measure.py
|
ChiragCD/NR-GAN
|
fc455c6219b09bc8bf605715504b78b2bb801e48
|
[
"MIT"
] | 54
|
2020-04-17T03:05:50.000Z
|
2022-03-07T20:30:35.000Z
|
models/measure.py
|
ChiragCD/NR-GAN
|
fc455c6219b09bc8bf605715504b78b2bb801e48
|
[
"MIT"
] | 8
|
2020-08-24T03:42:42.000Z
|
2022-03-12T00:21:33.000Z
|
models/measure.py
|
ChiragCD/NR-GAN
|
fc455c6219b09bc8bf605715504b78b2bb801e48
|
[
"MIT"
] | 14
|
2020-06-01T10:21:08.000Z
|
2021-12-30T07:24:22.000Z
|
import cv2
import numpy as np
import torch
import torch.nn.functional as F
def additive_gaussian_noise_measure(input,
noise_scale,
noise_scale_high=None,
image_range=(-1, 1),
with_noise=False):
if noise_scale_high is None:
_noise_scale = noise_scale
else:
_noise_scale = torch.empty(input.size(0)).uniform_(
noise_scale,
noise_scale_high)[:, None, None, None].to(input.device)
eps = torch.randn_like(input)
noise = eps * _noise_scale / 255. * (image_range[1] - image_range[0])
output = input + noise
if with_noise:
return output, noise
else:
return output
def local_gaussian_noise_measure(input,
noise_scale,
patch_size,
noise_scale_high=None,
patch_max_size=None,
image_range=(-1, 1),
with_noise=False):
batch_size, _, height, width = input.shape
patch = torch.zeros((batch_size, 1, height, width))
for i in range(batch_size):
if patch_max_size is None:
patch_width = patch_size
patch_height = patch_size
else:
patch_width = torch.randint(patch_size, patch_max_size + 1,
(1, )).item()
patch_height = torch.randint(patch_size, patch_max_size + 1,
(1, )).item()
x = torch.randint(0, width - patch_width + 1, (1, )).item()
y = torch.randint(0, height - patch_height + 1, (1, )).item()
patch[i][:, y:y + patch_height, x:x + patch_width] = 1
patch = patch.to(input.device)
noise = additive_gaussian_noise_measure(input,
noise_scale,
noise_scale_high,
image_range=image_range,
with_noise=True)[1]
noise = noise * patch
output = input + noise
if with_noise:
return output, noise
else:
return output
def uniform_noise_measure(input,
noise_scale,
noise_scale_high=None,
image_range=(-1, 1),
with_noise=False):
if noise_scale_high is None:
_noise_scale = noise_scale
else:
_noise_scale = torch.empty(input.size(0)).uniform_(
noise_scale,
noise_scale_high)[:, None, None, None].to(input.device)
eps = (torch.rand_like(input) * 2.) - 1.
noise = eps * _noise_scale / 255. * (image_range[1] - image_range[0])
output = input + noise
if with_noise:
return output, noise
else:
return output
def mixture_noise_measure(input,
noise_scale_list,
mixture_rate_list,
image_range=(-1, 1),
with_noise=False):
batch_size, channel, height, width = input.shape
noise = [None] * batch_size
for i in range(batch_size):
noise[i] = torch.zeros((channel, height * width))
perm = torch.randperm(height * width)
rand = torch.rand(height * width)
cumsum = np.cumsum([0] + mixture_rate_list)
for j, noise_scale in enumerate(noise_scale_list):
inds = (rand >= cumsum[j]) * (rand < cumsum[j + 1])
if j == len(noise_scale_list) - 1:
noise[i][:, perm[inds]] = (
(torch.rand(channel, torch.sum(inds)) * 2) -
1) * noise_scale / 255. * (image_range[1] - image_range[0])
else:
noise[i][:, perm[inds]] = torch.randn(
channel, torch.sum(inds)) * noise_scale / 255. * (
image_range[1] - image_range[0])
noise[i] = noise[i].view(channel, height, width).to(input.device)
noise = torch.stack(noise)
output = input + noise
if with_noise:
return output, noise
else:
return output
def brown_gaussian_noise_measure(input,
noise_scale,
noise_scale_high=None,
kernel_size=5,
image_range=(-1, 1),
with_noise=False):
noise = additive_gaussian_noise_measure(input,
noise_scale,
noise_scale_high,
image_range=image_range,
with_noise=True)[1]
padding = int((kernel_size - 1) / 2)
kernel = torch.Tensor(
cv2.getGaussianKernel(kernel_size, 0) *
cv2.getGaussianKernel(kernel_size, 0).transpose()).to(input.device)
kernel = kernel / torch.sqrt(torch.sum(kernel**2))
kernel = kernel[None, None]
kernel = kernel.expand(input.size(1), -1, -1, -1)
noise = F.conv2d(noise,
kernel,
stride=1,
padding=padding,
groups=input.size(1))
output = input + noise
if with_noise:
return output, noise
else:
return output
def additive_brown_gaussian_noise_measure(input,
noise_scale,
noise_scale_high=None,
kernel_size=5,
image_range=(-1, 1),
with_noise=False):
noise = additive_gaussian_noise_measure(input,
noise_scale,
noise_scale_high,
image_range=image_range,
with_noise=True)[1]
padding = int((kernel_size - 1) / 2)
kernel = torch.Tensor(
cv2.getGaussianKernel(kernel_size, 0) *
cv2.getGaussianKernel(kernel_size, 0).transpose()).to(input.device)
kernel = kernel / torch.sqrt(torch.sum(kernel**2))
kernel = kernel[None, None]
kernel = kernel.expand(input.size(1), -1, -1, -1)
noise = noise + F.conv2d(
noise, kernel, stride=1, padding=padding, groups=input.size(1))
output = input + noise
if with_noise:
return output, noise
else:
return output
def multiplicative_gaussian_noise_measure(input,
multi_noise_scale,
multi_noise_scale_high=None,
image_range=(-1, 1),
with_noise=False):
mean = np.mean(image_range)
scale = image_range[1] - image_range[0]
if multi_noise_scale_high is None:
_multi_noise_scale = multi_noise_scale
else:
_multi_noise_scale = torch.empty(input.size(0)).uniform_(
multi_noise_scale,
multi_noise_scale_high)[:, None, None, None].to(input.device)
eps = torch.randn_like(input)
noise = eps * _multi_noise_scale / 255. * (
(input.detach() - mean) / scale + 0.5) * scale
output = input + noise
if with_noise:
return output, noise
else:
return output
def additive_multiplicative_gaussian_noise_measure(input,
noise_scale,
multi_noise_scale,
noise_scale_high=None,
multi_noise_scale_high=None,
image_range=(-1, 1),
with_noise=False):
noise_mg = multiplicative_gaussian_noise_measure(input,
multi_noise_scale,
multi_noise_scale_high,
image_range=image_range,
with_noise=True)[1]
noise_ag = additive_gaussian_noise_measure(input,
noise_scale,
noise_scale_high,
image_range=image_range,
with_noise=True)[1]
noise = noise_mg + noise_ag
output = input + noise
if with_noise:
return output, noise
else:
return output
def poisson_noise_measure(input,
noise_lam,
noise_lam_high=None,
image_range=(-1, 1),
with_noise=False):
mean = np.mean(image_range)
scale = image_range[1] - image_range[0]
if noise_lam_high is None:
_noise_scale = np.sqrt(1. / noise_lam)
else:
_noise_lam = torch.empty(input.size(0)).uniform_(
noise_lam, noise_lam_high)[:, None, None, None].to(input.device)
_noise_scale = torch.sqrt(1. / _noise_lam)
eps = torch.randn_like(input)
noise = (eps * _noise_scale *
torch.sqrt((input.detach() - mean) / scale + 0.5)) * scale
output = input + noise
if with_noise:
return output, noise
else:
return output
def poisson_gaussian_noise_measure(input,
noise_lam,
noise_scale,
noise_lam_high=None,
noise_scale_high=None,
image_range=(-1, 1),
with_noise=False):
noise_p = poisson_noise_measure(input,
noise_lam,
noise_lam_high,
image_range=image_range,
with_noise=True)[1]
noise_ag = additive_gaussian_noise_measure(input,
noise_scale,
noise_scale_high,
image_range=image_range,
with_noise=True)[1]
noise = noise_p + noise_ag
output = input + noise
if with_noise:
return output, noise
else:
return output
| 40.56391
| 79
| 0.47127
| 1,063
| 10,790
| 4.503293
| 0.086548
| 0.125339
| 0.061416
| 0.068937
| 0.805933
| 0.778358
| 0.734698
| 0.721955
| 0.709212
| 0.656361
| 0
| 0.019156
| 0.448471
| 10,790
| 265
| 80
| 40.716981
| 0.785246
| 0
| 0
| 0.695473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041152
| false
| 0
| 0.016461
| 0
| 0.139918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
703f27bc66f95268fbe1099f4210f739ac6c66ae
| 4,562
|
py
|
Python
|
error_solver/data/_wire_load.py
|
line-mind/error_solver
|
472d086157e49cbe3e7d5b3278ddee6792cf676b
|
[
"BSD-3-Clause"
] | null | null | null |
error_solver/data/_wire_load.py
|
line-mind/error_solver
|
472d086157e49cbe3e7d5b3278ddee6792cf676b
|
[
"BSD-3-Clause"
] | 5
|
2018-12-22T20:59:42.000Z
|
2019-06-04T22:10:08.000Z
|
error_solver/data/_wire_load.py
|
mpewsey/error_solver
|
472d086157e49cbe3e7d5b3278ddee6792cf676b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Created by Error Solver on 2019-04-14 22:39:06
0: wind_pressure - wind_pressure_coeff*wind_velocity**2
1: horz_unit_load - wind_pressure*(diameter + 2*ice_thickness)*sin(azimuth - wind_azimuth)**2
2: -pi*ice_density*ice_thickness*(diameter + ice_thickness) - unit_weight + vert_unit_load
3: -horz_unit_load**2 - vert_unit_load**2 + (-k_factor + unit_load)**2
"""
from math import *
# Equation 0
def eq0(wind_velocity, wind_pressure_coeff, wind_pressure, **kwargs):
return wind_pressure - wind_pressure_coeff*wind_velocity**2
def eq0_wind_velocity(wind_velocity, wind_pressure_coeff, wind_pressure, **kwargs):
return -2*wind_pressure_coeff*wind_velocity
def eq0_wind_pressure_coeff(wind_velocity, wind_pressure_coeff, wind_pressure, **kwargs):
return -wind_velocity**2
def eq0_wind_pressure(wind_velocity, wind_pressure_coeff, wind_pressure, **kwargs):
return 1
# Equation 1
def eq1(horz_unit_load, azimuth, ice_thickness, diameter, wind_azimuth, wind_pressure, **kwargs):
return horz_unit_load - wind_pressure*(diameter + 2*ice_thickness)*sin(azimuth - wind_azimuth)**2
def eq1_horz_unit_load(horz_unit_load, azimuth, ice_thickness, diameter, wind_azimuth, wind_pressure, **kwargs):
return 1
def eq1_azimuth(horz_unit_load, azimuth, ice_thickness, diameter, wind_azimuth, wind_pressure, **kwargs):
return -2*wind_pressure*(diameter + 2*ice_thickness)*sin(azimuth - wind_azimuth)*cos(azimuth - wind_azimuth)
def eq1_ice_thickness(horz_unit_load, azimuth, ice_thickness, diameter, wind_azimuth, wind_pressure, **kwargs):
return -2*wind_pressure*sin(azimuth - wind_azimuth)**2
def eq1_diameter(horz_unit_load, azimuth, ice_thickness, diameter, wind_azimuth, wind_pressure, **kwargs):
return -wind_pressure*sin(azimuth - wind_azimuth)**2
def eq1_wind_azimuth(horz_unit_load, azimuth, ice_thickness, diameter, wind_azimuth, wind_pressure, **kwargs):
return 2*wind_pressure*(diameter + 2*ice_thickness)*sin(azimuth - wind_azimuth)*cos(azimuth - wind_azimuth)
def eq1_wind_pressure(horz_unit_load, azimuth, ice_thickness, diameter, wind_azimuth, wind_pressure, **kwargs):
return -(diameter + 2*ice_thickness)*sin(azimuth - wind_azimuth)**2
# Equation 2
def eq2(unit_weight, vert_unit_load, diameter, ice_thickness, ice_density, **kwargs):
return -pi*ice_density*ice_thickness*(diameter + ice_thickness) - unit_weight + vert_unit_load
def eq2_unit_weight(unit_weight, vert_unit_load, diameter, ice_thickness, ice_density, **kwargs):
return -1
def eq2_vert_unit_load(unit_weight, vert_unit_load, diameter, ice_thickness, ice_density, **kwargs):
return 1
def eq2_diameter(unit_weight, vert_unit_load, diameter, ice_thickness, ice_density, **kwargs):
return -pi*ice_density*ice_thickness
def eq2_ice_thickness(unit_weight, vert_unit_load, diameter, ice_thickness, ice_density, **kwargs):
return -pi*ice_density*ice_thickness - pi*ice_density*(diameter + ice_thickness)
def eq2_ice_density(unit_weight, vert_unit_load, diameter, ice_thickness, ice_density, **kwargs):
return -pi*ice_thickness*(diameter + ice_thickness)
# Equation 3
def eq3(horz_unit_load, unit_load, vert_unit_load, k_factor, **kwargs):
return -horz_unit_load**2 - vert_unit_load**2 + (-k_factor + unit_load)**2
def eq3_horz_unit_load(horz_unit_load, unit_load, vert_unit_load, k_factor, **kwargs):
return -2*horz_unit_load
def eq3_unit_load(horz_unit_load, unit_load, vert_unit_load, k_factor, **kwargs):
return -2*k_factor + 2*unit_load
def eq3_vert_unit_load(horz_unit_load, unit_load, vert_unit_load, k_factor, **kwargs):
return -2*vert_unit_load
def eq3_k_factor(horz_unit_load, unit_load, vert_unit_load, k_factor, **kwargs):
return 2*k_factor - 2*unit_load
# Assembled functions
EQUATIONS = [
eq0,
eq1,
eq2,
eq3
]
PARTIALS = [
{'wind_velocity': eq0_wind_velocity, 'wind_pressure_coeff': eq0_wind_pressure_coeff, 'wind_pressure': eq0_wind_pressure},
{'horz_unit_load': eq1_horz_unit_load, 'azimuth': eq1_azimuth, 'ice_thickness': eq1_ice_thickness, 'diameter': eq1_diameter, 'wind_azimuth': eq1_wind_azimuth, 'wind_pressure': eq1_wind_pressure},
{'unit_weight': eq2_unit_weight, 'vert_unit_load': eq2_vert_unit_load, 'diameter': eq2_diameter, 'ice_thickness': eq2_ice_thickness, 'ice_density': eq2_ice_density},
{'horz_unit_load': eq3_horz_unit_load, 'unit_load': eq3_unit_load, 'vert_unit_load': eq3_vert_unit_load, 'k_factor': eq3_k_factor}
]
COMBOS = {
'wind_velocity': [0, 1, 2, 3],
'wind_pressure': [1, 2, 3]
}
| 36.206349
| 199
| 0.770495
| 686
| 4,562
| 4.708455
| 0.072886
| 0.141176
| 0.085449
| 0.081734
| 0.820743
| 0.743344
| 0.701858
| 0.69226
| 0.667492
| 0.60031
| 0
| 0.026349
| 0.11815
| 4,562
| 125
| 200
| 36.496
| 0.776535
| 0.092942
| 0
| 0.04918
| 0
| 0
| 0.058168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.360656
| false
| 0
| 0.016393
| 0.360656
| 0.737705
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
704bc58f6a857252c01cb8bb554fc24010296c45
| 4,786
|
py
|
Python
|
tests/smolder_tests.py
|
bskyb-shiny/smolder
|
385df80e83c8370bfe285948242f39e20e99db24
|
[
"BSD-3-Clause"
] | 103
|
2015-04-09T15:36:39.000Z
|
2020-04-23T12:16:51.000Z
|
tests/smolder_tests.py
|
bskyb-shiny/smolder
|
385df80e83c8370bfe285948242f39e20e99db24
|
[
"BSD-3-Clause"
] | 41
|
2015-04-23T10:56:55.000Z
|
2017-11-16T17:55:47.000Z
|
tests/smolder_tests.py
|
bskyb-shiny/smolder
|
385df80e83c8370bfe285948242f39e20e99db24
|
[
"BSD-3-Clause"
] | 12
|
2015-05-30T23:07:17.000Z
|
2017-11-15T14:43:42.000Z
|
#!/usr/bin/env python
import charcoal
from charcoal import Charcoal
import yaml
import os
import logging
import json
import socket
from nose.tools import raises
import requests
import httpretty
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
LOG = logging.getLogger('smolder')
LOG.setLevel(logging.DEBUG)
def test_github_status():
total_failed_tests = 0
total_passed_tests = 0
myfile = open(THIS_DIR + '/fixtures/github_status.json')
test_json = yaml.load(myfile)
for test in test_json['tests']:
test_obj = Charcoal(test=test, host='status.github.com')
total_failed_tests += test_obj.failed
total_passed_tests += test_obj.passed
assert total_failed_tests == 0
def test_github_status_expect_fail():
total_failed_tests = 0
total_passed_tests = 0
myfile = open(THIS_DIR + '/fixtures/harsh_github_status.json')
test_json = yaml.load(myfile)
for test in test_json['tests']:
test_obj = Charcoal(test=test, host='status.github.com')
total_failed_tests += test_obj.failed
total_passed_tests += test_obj.passed
assert total_failed_tests > 0
def test_tcp_tests():
total_failed_tests = 0
total_passed_tests = 0
myfile = open(THIS_DIR + '/fixtures/tcp_test.yaml')
test_json = yaml.load(myfile)
for test in test_json['tests']:
test_obj = Charcoal(test=test, host='status.github.com')
total_failed_tests += test_obj.failed
total_passed_tests += test_obj.passed
assert total_failed_tests == 0
@httpretty.activate
def test_validate_json():
total_failed_tests = 0
total_passed_tests = 0
mytest = open(THIS_DIR + '/mocks/validate_json_response.json')
httpretty.register_uri(httpretty.GET, "http://fakehost111.com/somejson", body=json.dumps(yaml.load(mytest)),
content_type="application/json")
validate_httpretty = requests.get("http://fakehost111.com/somejson")
LOG.debug("Expected response: {0}".format(validate_httpretty.json()))
myfile = open(THIS_DIR + '/fixtures/validate_json.yaml')
test_json = yaml.load(myfile)
for test in test_json['tests']:
test_obj = Charcoal(test=test, host='fakehost111.com')
total_failed_tests += test_obj.failed
total_passed_tests += test_obj.passed
assert total_failed_tests == 0
@httpretty.activate
def test_validate_json_fail():
total_failed_tests = 0
total_passed_tests = 0
mytest = open(THIS_DIR + '/mocks/validate_json_response_fail.json')
json_response = yaml.load(mytest)
httpretty.register_uri(httpretty.GET, "http://fakehost111.com/somejson", body=str(json_response),
content_type="application/json")
myfile = open(THIS_DIR + '/fixtures/validate_json.yaml')
test_json = yaml.load(myfile)
for test in test_json['tests']:
test_obj = Charcoal(test=test, host='fakehost111.com')
if test_obj.failed > 0:
LOG.debug(str(test_obj))
total_failed_tests += test_obj.failed
total_passed_tests += test_obj.passed
assert total_failed_tests > 0
@raises(yaml.parser.ParserError)
def test_invalid_yaml_format():
total_failed_tests = 0
total_passed_tests = 0
myfile = open(THIS_DIR + '/fixtures/invalid_yaml.yaml')
test_json = yaml.load(myfile)
for test in test_json['tests']:
test_obj = Charcoal(test=test, host='status.github.com')
if test_obj.failed > 0:
LOG.debug(str(test_obj))
total_failed_tests += test_obj.failed
total_passed_tests += test_obj.passed
assert total_failed_tests == 0
def test_tcp():
charcoal.tcp_test('8.8.8.8', 53)
def test_tcp_local():
total_failed_tests = 0
total_passed_tests = 0
myfile = open(THIS_DIR + '/fixtures/tcp_test_local.yaml')
test_json = yaml.load(myfile)
for test in test_json['tests']:
test_obj = Charcoal(test=test, host='8.8.8.8')
if test_obj.failed > 0:
LOG.debug(str(test_obj))
total_failed_tests += test_obj.failed
total_passed_tests += test_obj.passed
assert total_failed_tests == 0
def test_tcp_local_fail():
total_failed_tests = 0
total_passed_tests = 0
myfile = open(THIS_DIR + '/fixtures/tcp_test_local_fail.yaml')
test_json = yaml.load(myfile)
for test in test_json['tests']:
test_obj = Charcoal(test=test, host='localhost')
if test_obj.failed > 0:
LOG.debug(str(test_obj))
total_failed_tests += test_obj.failed
total_passed_tests += test_obj.passed
assert total_failed_tests > 0
def test_tcp_test():
charcoal.tcp_test('127.0.0.1', 22) # Are you running an ssh server?
@raises(socket.error)
def test_fail_tcp_test():
charcoal.tcp_test('127.0.0.1', 4242)
| 33.236111
| 112
| 0.693899
| 678
| 4,786
| 4.606195
| 0.131268
| 0.071726
| 0.122959
| 0.087096
| 0.785142
| 0.775857
| 0.775857
| 0.775857
| 0.775857
| 0.757605
| 0
| 0.01873
| 0.196824
| 4,786
| 143
| 113
| 33.468531
| 0.793704
| 0.010656
| 0
| 0.633333
| 0
| 0
| 0.134587
| 0.06423
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.091667
| false
| 0.133333
| 0.083333
| 0
| 0.175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
7058ee6d23d0022f208ad979fa2a8173a1f167d2
| 251
|
py
|
Python
|
icarus/scenarios/__init__.py
|
oascigil/icarus_edge_comp
|
b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b
|
[
"MIT"
] | 5
|
2021-03-20T09:22:55.000Z
|
2021-12-20T17:01:33.000Z
|
icarus/scenarios/__init__.py
|
oascigil/icarus_edge_comp
|
b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b
|
[
"MIT"
] | 1
|
2021-12-13T07:40:46.000Z
|
2021-12-20T16:59:08.000Z
|
icarus/scenarios/__init__.py
|
oascigil/icarus_edge_comp
|
b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b
|
[
"MIT"
] | 1
|
2021-11-25T05:42:20.000Z
|
2021-11-25T05:42:20.000Z
|
# -*- coding: utf-8 -*-
"""This package contains the code for generating simulation scenarios.
"""
from icarus.scenarios.algorithms import *
from .cacheplacement import *
from .contentplacement import *
from .topology import *
from .workload import *
| 27.888889
| 70
| 0.752988
| 29
| 251
| 6.517241
| 0.689655
| 0.21164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00463
| 0.139442
| 251
| 8
| 71
| 31.375
| 0.87037
| 0.358566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7088a8a4bdaaeb1edae39e3ee96c7085b6273da6
| 94
|
py
|
Python
|
tests/__init__.py
|
methane/fluent-logger-python
|
c63361536a236c7063ceb05d4a0012b331c06225
|
[
"Apache-1.1"
] | null | null | null |
tests/__init__.py
|
methane/fluent-logger-python
|
c63361536a236c7063ceb05d4a0012b331c06225
|
[
"Apache-1.1"
] | null | null | null |
tests/__init__.py
|
methane/fluent-logger-python
|
c63361536a236c7063ceb05d4a0012b331c06225
|
[
"Apache-1.1"
] | null | null | null |
import sys
sys.path = ['..'] + sys.path
from test_handler import *
from test_sender import *
| 15.666667
| 28
| 0.702128
| 14
| 94
| 4.571429
| 0.5
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 94
| 5
| 29
| 18.8
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
560d846298d162afbe41109636ea96e42ef47298
| 521
|
py
|
Python
|
crits/locations/urls.py
|
frbapolkosnik/crits
|
1278c034f2238e2fe34e65e32ce241128a014df2
|
[
"MIT"
] | 22
|
2015-01-14T19:49:32.000Z
|
2022-01-26T12:18:52.000Z
|
crits/locations/urls.py
|
deadbits/crits
|
154097a1892e9d3960d6faaed4bd2e912a196a47
|
[
"MIT"
] | null | null | null |
crits/locations/urls.py
|
deadbits/crits
|
154097a1892e9d3960d6faaed4bd2e912a196a47
|
[
"MIT"
] | 6
|
2015-01-22T21:25:52.000Z
|
2021-04-12T23:24:14.000Z
|
from django.conf.urls import url
urlpatterns = [
url(r'^add/(?P<type_>\w+)/(?P<id_>\w+)/$', 'add_location', prefix='crits.locations.views'),
url(r'^edit/(?P<type_>\w+)/(?P<id_>\w+)/$', 'edit_location', prefix='crits.locations.views'),
url(r'^remove/(?P<type_>\w+)/(?P<id_>\w+)/$', 'remove_location', prefix='crits.locations.views'),
url(r'^name_list/$', 'location_names', prefix='crits.locations.views'),
url(r'^name_list/(?P<active_only>\S+)/$', 'location_names', prefix='crits.locations.views'),
]
| 52.1
| 101
| 0.635317
| 76
| 521
| 4.171053
| 0.342105
| 0.063091
| 0.315457
| 0.394322
| 0.747634
| 0.747634
| 0.492114
| 0.233438
| 0
| 0
| 0
| 0
| 0.084453
| 521
| 9
| 102
| 57.888889
| 0.66457
| 0
| 0
| 0
| 0
| 0
| 0.621881
| 0.46833
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
567154d837e7548487eecece34f3b156561756d4
| 17,846
|
py
|
Python
|
test/test_markdown_extra.py
|
jackdewinter/pymarkdown
|
7ae408ba0b24506fa07552ffe520750bbff38c53
|
[
"MIT"
] | 20
|
2021-01-14T17:39:09.000Z
|
2022-03-14T08:35:22.000Z
|
test/test_markdown_extra.py
|
jackdewinter/pymarkdown
|
7ae408ba0b24506fa07552ffe520750bbff38c53
|
[
"MIT"
] | 304
|
2020-08-15T23:24:00.000Z
|
2022-03-31T23:34:03.000Z
|
test/test_markdown_extra.py
|
jackdewinter/pymarkdown
|
7ae408ba0b24506fa07552ffe520750bbff38c53
|
[
"MIT"
] | 3
|
2021-08-11T10:26:26.000Z
|
2021-11-02T20:41:27.000Z
|
"""
Extra tests.
"""
import pytest
from .utils import act_and_assert
@pytest.mark.gfm
def test_extra_001():
"""
Test a totally blank input.
"""
# Arrange
source_markdown = ""
expected_tokens = ["[BLANK(1,1):]"]
expected_gfm = ""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_002():
"""
Test a blank input with only whitespace.
"""
# Arrange
source_markdown = " "
expected_tokens = ["[BLANK(1,1): ]"]
expected_gfm = ""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_003():
"""
Test to make sure the wide range of characters meets the GRM/CommonMark encodings.
Note that since % is not followed by a 2 digit hex value, it is encoded per
the common mark libraries.
"""
# Arrange
source_markdown = "[link](!\"#$%&'\\(\\)*+,-./0123456789:;<=>?@A-Z[\\\\]^_`a-z{|}~)"
expected_tokens = [
"[para(1,1):]",
"[link(1,1):inline:!%22#$%25&'()*+,-./0123456789:;%3C=%3E?@A-Z%5B%5C%5D%5E_%60a-z%7B%7C%7D~::!\"#$%&'\\(\\)*+,-./0123456789:;<=>?@A-Z[\\\\]^_`a-z{|}~:::link:False::::]",
"[text(1,2):link:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = '<p><a href="!%22#$%25&\'()*+,-./0123456789:;%3C=%3E?@A-Z%5B%5C%5D%5E_%60a-z%7B%7C%7D~">link</a></p>'
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_004():
"""
Test to make sure the wide range of characters meets the GRM/CommonMark encodings.
Note that since % is followed by a 2 digit hex value, it is encoded per the common
mark libraries except for the % and the 2 digit hex value following it.
Another example of this is example 511:
https://github.github.com/gfm/#example-511
"""
# Arrange
source_markdown = (
"[link](!\"#$%12&'\\(\\)*+,-./0123456789:;<=>?@A-Z[\\\\]^_`a-z{|}~)"
)
expected_tokens = [
"[para(1,1):]",
"[link(1,1):inline:!%22#$%12&'()*+,-./0123456789:;%3C=%3E?@A-Z%5B%5C%5D%5E_%60a-z%7B%7C%7D~::!\"#$%12&'\\(\\)*+,-./0123456789:;<=>?@A-Z[\\\\]^_`a-z{|}~:::link:False::::]",
"[text(1,2):link:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = '<p><a href="!%22#$%12&\'()*+,-./0123456789:;%3C=%3E?@A-Z%5B%5C%5D%5E_%60a-z%7B%7C%7D~">link</a></p>'
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_005():
"""
When encoding link characters, special attention is used for the % characters as
the CommonMark parser treats "%<hex-char><hex-char>" as non-encodable. Make sure
this is tested at the end of the link.
"""
# Arrange
source_markdown = "[link](http://google.com/search%)"
expected_tokens = [
"[para(1,1):]",
"[link(1,1):inline:http://google.com/search%25::http://google.com/search%:::link:False::::]",
"[text(1,2):link:]",
"[end-link::]",
"[end-para:::True]",
]
expected_gfm = '<p><a href="http://google.com/search%25">link</a></p>'
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_006():
"""
lists and fenced code blocks within a block quote
"""
# Arrange
source_markdown = """> + list
> ```block
> A code block
> ```
> 1. another list
"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> \n> \n> ]",
"[ulist(1,3):+::4: ]",
"[para(1,5):]",
"[text(1,5):list:]",
"[end-para:::False]",
"[end-ulist:::True]",
"[fcode-block(2,3):`:3:block:::::]",
"[text(3,3):A code block:]",
"[end-fcode-block::3:False]",
"[olist(5,3):.:1:5: ]",
"[para(5,6):]",
"[text(5,6):another list:]",
"[end-para:::True]",
"[BLANK(6,1):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<ul>
<li>list</li>
</ul>
<pre><code class="language-block">A code block
</code></pre>
<ol>
<li>another list</li>
</ol>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_007a():
"""
Text and a link reference definition within a block quote.
"""
# Arrange
source_markdown = """> this is text
> [a not so
> simple](/link
> "a title")
> a real test
"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> \n> \n> \n]",
"[para(1,3):\n\n \n\n ]",
"[text(1,3):this is text\n::\n]",
'[link(2,3):inline:/link:a title::::a not so\nsimple:False:"::\n:]',
"[text(2,4):a not so\nsimple::\n]",
"[end-link::]",
"[text(4,13):\na real test::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(6,1):]",
]
expected_gfm = """<blockquote>
<p>this is text
<a href="/link" title="a title">a not so
simple</a>
a real test</p>
</blockquote>"""
# Act & Assert
act_and_assert(
source_markdown,
expected_gfm,
expected_tokens,
disable_consistency_checks=True,
)
@pytest.mark.gfm
def test_extra_007b():
"""
Variation on 7a with more spacing
"""
# Arrange
source_markdown = """> this is text
> [a not
> so simple](/link
> "a
> title"
> )
> a real test
"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> \n> \n> \n> \n> \n]",
"[para(1,3):\n\n \n\n \n \n]",
"[text(1,3):this is text\n::\n]",
'[link(2,3):inline:/link:a\ntitle::::a not\nso simple:False:"::\n:\n]',
"[text(2,4):a not\nso simple::\n]",
"[end-link::]",
"[text(6,5):\na real test::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(8,1):]",
]
expected_gfm = """<blockquote>
<p>this is text
<a href="/link" title="a
title">a not
so simple</a>
a real test</p>
</blockquote>"""
# Act & Assert
act_and_assert(
source_markdown,
expected_gfm,
expected_tokens,
disable_consistency_checks=True,
)
@pytest.mark.gfm
def test_extra_007c():
"""
Variation on 7a with more spacing
"""
# Arrange
source_markdown = """> this is text
> [a
> not
> so simple](/link
> "a
> title"
> )
> a real test
"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> \n> \n> \n> \n> \n> \n]",
"[para(1,3):\n\n \n \n\n \n \n]",
"[text(1,3):this is text\n::\n]",
'[link(2,3):inline:/link:a\ntitle::::a\nnot\nso simple:False:"::\n:\n]',
"[text(2,4):a\nnot\nso simple:: \n\n]",
"[end-link::]",
"[text(7,5):\na real test::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(9,1):]",
]
expected_gfm = """<blockquote>
<p>this is text
<a href="/link" title="a
title">a
not
so simple</a>
a real test</p>
</blockquote>"""
# Act & Assert
act_and_assert(
source_markdown, expected_gfm, expected_tokens, disable_consistency_checks=True
)
@pytest.mark.gfm
def test_extra_007d():
"""
Variation on 7a with more spacing
"""
# Arrange
source_markdown = """> this is text
> [a
> not
> so simple](/link
> "a
> title"
> )
> a real test
"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> \n> \n> \n> \n> \n> \n]",
"[para(1,3):\n\n \n \n\n \n \n]",
"[text(1,3):this is text\n::\n]",
'[link(2,3):inline:/link:a\ntitle::::a\nnot\nso simple:False:"::\n:\n]',
"[text(2,4):a\nnot\nso simple:: \n\n]",
"[end-link::]",
"[text(7,5):\na real test::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(9,1):]",
]
expected_gfm = """<blockquote>
<p>this is text
<a href="/link" title="a
title">a
not
so simple</a>
a real test</p>
</blockquote>"""
# Act & Assert
act_and_assert(
source_markdown, expected_gfm, expected_tokens, disable_consistency_checks=True
)
@pytest.mark.gfm
def test_extra_007e():
"""
Almost looks like a fenced code block, but is really a code span.
"""
# Arrange
source_markdown = """> this is text
> ``
> foo
> bar
> baz
> ``
> a real test
"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> \n> \n> \n> \n> \n]",
"[para(1,3):\n\n\n\n\n\n]",
"[text(1,3):this is text\n::\n]",
"[icode-span(2,3):foo\a\n\a \abar \a\n\a \abaz:``:\a\n\a \a:\a\n\a \a]",
"[text(6,5):\na real test::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(8,1):]",
]
expected_gfm = """<blockquote>
<p>this is text
<code>foo bar baz</code>
a real test</p>
</blockquote>"""
# Act & Assert
act_and_assert(
source_markdown, expected_gfm, expected_tokens, disable_consistency_checks=True
)
@pytest.mark.gfm
def test_extra_008x():
"""
Simple unordered list with increasing indent in a block quote.
"""
# Arrange
source_markdown = """> * this is level 1
> * this is level 2
> * this is level 3
"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> ]",
"[ulist(1,3):*::4: ]",
"[para(1,5):]",
"[text(1,5):this is level 1:]",
"[end-para:::True]",
"[ulist(2,5):*::6: ]",
"[para(2,7):]",
"[text(2,7):this is level 2:]",
"[end-para:::True]",
"[ulist(3,7):*::8: ]",
"[para(3,9):]",
"[text(3,9):this is level 3:]",
"[end-para:::True]",
"[BLANK(4,1):]",
"[end-ulist:::True]",
"[end-ulist:::True]",
"[end-ulist:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<ul>
<li>
<p>this is level 1</p>
<ul>
<li>
<p>this is level 2</p>
<ul>
<li>this is level 3</li>
</ul>
</li>
</ul>
</li>
</ul>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_008a():
"""
Variation on 8 with no block quote.
"""
# Arrange
source_markdown = """* this is level 1
* this is level 2
* this is level 3
"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):this is level 1:]",
"[end-para:::True]",
"[ulist(2,3):*::4: ]",
"[para(2,5):]",
"[text(2,5):this is level 2:]",
"[end-para:::True]",
"[ulist(3,5):*::6: ]",
"[para(3,7):]",
"[text(3,7):this is level 3:]",
"[end-para:::True]",
"[BLANK(4,1):]",
"[end-ulist:::True]",
"[end-ulist:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>this is level 1
<ul>
<li>this is level 2
<ul>
<li>this is level 3</li>
</ul>
</li>
</ul>
</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_009():
"""
Simple block quote within an unordered list.
"""
# Arrange
source_markdown = """- > This is one section of a block quote
"""
expected_tokens = [
"[ulist(1,1):-::2:]",
"[block-quote(1,3): : > \n\n]",
"[para(1,5):]",
"[text(1,5):This is one section of a block quote:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(2,1):]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>
<blockquote>
<p>This is one section of a block quote</p>
</blockquote>
</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_009a():
"""
Simple block quote within an ordered list.
"""
# Arrange
source_markdown = """1. > This is one section of a block quote
"""
expected_tokens = [
"[olist(1,1):.:1:3:]",
"[block-quote(1,4): : > \n\n]",
"[para(1,6):]",
"[text(1,6):This is one section of a block quote:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(2,1):]",
"[end-olist:::True]",
]
expected_gfm = """<ol>
<li>
<blockquote>
<p>This is one section of a block quote</p>
</blockquote>
</li>
</ol>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_010x():
"""
List item with weird progression.
"""
# Arrange
source_markdown = """* First Item
* First-First
* First-Second
* First-Third
* Second Item
"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):First Item:]",
"[end-para:::True]",
"[ulist(2,3):*::4: ]",
"[para(2,5):]",
"[text(2,5):First-First:]",
"[end-para:::True]",
"[li(3,4):5: :]",
"[para(3,6):]",
"[text(3,6):First-Second:]",
"[end-para:::True]",
"[li(4,5):6: :]",
"[para(4,7):]",
"[text(4,7):First-Third:]",
"[end-para:::True]",
"[end-ulist:::True]",
"[li(5,1):2::]",
"[para(5,3):]",
"[text(5,3):Second Item:]",
"[end-para:::True]",
"[BLANK(6,1):]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>First Item
<ul>
<li>First-First</li>
<li>First-Second</li>
<li>First-Third</li>
</ul>
</li>
<li>Second Item</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_010a():
"""
List item with weird progression.
"""
# Arrange
source_markdown = """* First Item
* Second Item
* Third Item
"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):First Item:]",
"[end-para:::True]",
"[li(2,2):3: :]",
"[para(2,4):: ]",
"[text(2,4):Second Item:]",
"[end-para:::True]",
"[li(3,3):4: :]",
"[para(3,5):]",
"[text(3,5):Third Item:]",
"[end-para:::True]",
"[BLANK(4,1):]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>First Item</li>
<li>Second Item</li>
<li>Third Item</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_010b():
"""
List item with weird progression.
"""
# Arrange
source_markdown = """1. First Item
1. First-First
1. First-Second
1. First-Third
1. First-Four
1. Second Item
"""
expected_tokens = [
"[olist(1,1):.:1:3:]",
"[para(1,4):]",
"[text(1,4):First Item:]",
"[end-para:::True]",
"[olist(2,4):.:1:6: ]",
"[para(2,7):]",
"[text(2,7):First-First:]",
"[end-para:::True]",
"[li(3,5):7: :1]",
"[para(3,8):]",
"[text(3,8):First-Second:]",
"[end-para:::True]",
"[li(4,6):8: :1]",
"[para(4,9):]",
"[text(4,9):First-Third:]",
"[end-para:::True]",
"[li(5,7):9: :1]",
"[para(5,10):]",
"[text(5,10):First-Four:]",
"[end-para:::True]",
"[end-olist:::True]",
"[li(6,1):3::1]",
"[para(6,4):]",
"[text(6,4):Second Item:]",
"[end-para:::True]",
"[BLANK(7,1):]",
"[end-olist:::True]",
]
expected_gfm = """<ol>
<li>First Item
<ol>
<li>First-First</li>
<li>First-Second</li>
<li>First-Third</li>
<li>First-Four</li>
</ol>
</li>
<li>Second Item</li>
</ol>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_011x():
"""
Block quote followed directly by Atx Heading.
"""
# Arrange
source_markdown = """> simple text
> dd
> dd
# a
"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> ]",
"[para(1,3):\n\n]",
"[text(1,3):simple text\ndd\ndd::\n\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[atx(4,1):1:0:]",
"[text(4,3):a: ]",
"[end-atx::]",
"[BLANK(5,1):]",
]
expected_gfm = """<blockquote>
<p>simple text
dd
dd</p>
</blockquote>
<h1>a</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_011a():
"""
Variation of 11 with no newline after Atx Heading
"""
# Arrange
source_markdown = """> simple text
> dd
> dd
# a"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> ]",
"[para(1,3):\n\n]",
"[text(1,3):simple text\ndd\ndd::\n\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[atx(4,1):1:0:]",
"[text(4,3):a: ]",
"[end-atx::]",
]
expected_gfm = """<blockquote>
<p>simple text
dd
dd</p>
</blockquote>
<h1>a</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_extra_011b():
"""
Variation of 11 with newline after Block Quote and before Atx Heading
"""
# Arrange
source_markdown = """> simple text
> dd
> dd
# a"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> \n]",
"[para(1,3):\n\n]",
"[text(1,3):simple text\ndd\ndd::\n\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(4,1):]",
"[atx(5,1):1:0:]",
"[text(5,3):a: ]",
"[end-atx::]",
]
expected_gfm = """<blockquote>
<p>simple text
dd
dd</p>
</blockquote>
<h1>a</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 23.11658
| 183
| 0.511039
| 2,455
| 17,846
| 3.621181
| 0.086762
| 0.018223
| 0.016198
| 0.016198
| 0.811586
| 0.78234
| 0.76333
| 0.734871
| 0.71721
| 0.697413
| 0
| 0.045031
| 0.254623
| 17,846
| 771
| 184
| 23.146563
| 0.62329
| 0.110109
| 0
| 0.68838
| 0
| 0.026408
| 0.498418
| 0.074717
| 0
| 0
| 0
| 0
| 0.038732
| 1
| 0.036972
| false
| 0
| 0.003521
| 0
| 0.040493
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
56a3d8cd707be0b0aee50003dc6b43c94cc699b6
| 42,767
|
py
|
Python
|
pyrosetta/api.py
|
blockjoe/rosetta-api-client-python
|
707f325f7560ffa6d5dfe361aff4779cc0b7182f
|
[
"Apache-2.0"
] | null | null | null |
pyrosetta/api.py
|
blockjoe/rosetta-api-client-python
|
707f325f7560ffa6d5dfe361aff4779cc0b7182f
|
[
"Apache-2.0"
] | null | null | null |
pyrosetta/api.py
|
blockjoe/rosetta-api-client-python
|
707f325f7560ffa6d5dfe361aff4779cc0b7182f
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict, List, Iterable, Optional, Union
import requests
from .models import (
AccountBalanceResponse,
AccountCoinsResponse,
AccountIdentifier,
BlockIdentifier,
BlockResponse,
BlockTransactionResponse,
Currency,
NetworkIdentifier,
NetworkOptionsResponse,
NetworkStatusResponse,
MempoolTransactionResponse,
PartialBlockIdentifier,
Transaction,
TransactionIdentifier
)
from .utils import (
make_AccountIdentifier,
make_Currencies,
make_NetworkIdentifier,
make_PartialBlockIdentifier
)
from . import network as net
from . import account as acnt
from . import block as blk
from . import mempool as memp
from . import construction as cnst
from . import events as evnt
from . import search as srch
class RosettaAPI(object):
def __init__(self, api_url: str, session : Optional[requests.Session] = None) -> None:
"""
Parameters
----------
api_url: str
The url where the node is located.
session: requests.Session, optional
An already existing requests sesion. If none is passed
a session will be created for this object.
"""
self._api_url = api_url
if session is None:
session = requests.Session()
self._session = session
self._network_identifier = None
@property
def session(self) -> requests.Session:
return self._session
@property
def url(self) -> str:
return self._api_url
def list_supported_networks(self, **kwargs) -> List[NetworkIdentifier]:
"""
Get a list of supported networks.
Parameters
----------
**kwargs
Any additional metadata to be passed along to the /network/list request.
See the individual node implementation to verify if additional
metadata is needed.
Returns
-------
list[NetworkIdentifier]
blockchain : str
network : str
subnetwork_id : SubNetworkIdentifier, optional
"""
return net.list_supported(self.url, self.session, **kwargs)
@property
def current_network(self) -> Optional[NetworkIdentifier]:
return self._network_identifier
@current_network.setter
def current_network(self, network_id: NetworkIdentifier) -> None:
if not isinstance(network_id, NetworkIdentifier):
raise ValueError("`current_network` must explicitly be a NetworkIdentifier. These are returned by the `supported_networks` method. If trying to set `current_network` by strings, see the `select_network` method.")
self._network_identifier = network_id
def select_network(self, blockchain : str, network : str, subnetwork : Optional[str] = None, subnetwork_metadata : Optional[Dict[str, Any]] = None) -> None:
"""
Select the `current_network` by known string values.
Parameters
----------
blockchain: str
The name of the blockchain. Ex: 'bitcoin'
network: str
The chain-id or network identifier. Ex: 'mainnet' or 'testnet'
subnetwork: str, optional
The name or identifier of the subnetwork if needed. Ex: 'shard-1'
subnetwork_metadata: dict[str, Any], optional
Any additional metadata needed to identify the subnetwork. See the
individual node implementation to verifiy if additional metadata is needed.
"""
self.current_network = make_NetworkIdentifier(blockchain, network, subnetwork, subnetwork_metadata)
def _network_status(self, network_id : NetworkIdentifier, **kwargs) -> NetworkStatusResponse:
"""
Private method for `network_status` to proivde an interface that
supports calls with existing objects.
"""
return net.status(self.url, network_id, self.session, **kwargs)
def current_network_status(self, **kwargs) -> NetworkStatusResponse:
"""
Get the status of the current network.
Parameters
----------
**kwargs
Any additional metadata to be passed along to the /network/status.
See the individual node implementation to verify if additional
metadata is needed.
Returns
-------
NetworkStatusResponse
current_block_identifier: BlockIdentifier
current_block_timestamp: Timestamp
genesis_block_identifier: BlockIdentifier
oldest_block_identifier: BlockIdentifier, optional
sync_status: SyncStatus, optional
peers: List[Peer]
See Also
--------
select_network: For selecting a current_network.
Raises
------
RuntimeError: If not current network has been selected.
"""
if self.current_network is None:
raise RuntimeError("No `current_network` has been selected. See `select_network` for selecting a current network.")
return self._network_status(self.current_network, **kwargs)
def network_status(self, blockchain : str, network : str, subnetwork : Optional[str] = None, subnetwork_metadata : Optional[Dict[str, Any]] = None, **kwargs) -> NetworkStatusResponse:
"""
Get the status of a desired network.
Parameters
----------
blockchain: str
The name of the blockchain. Ex: 'bitcoin'
network: str
The chain-id or network identifier. Ex: 'mainnet' or 'testnet'
subnetwork: str, optional
The name or identifier of the subnetwork if needed. Ex: 'shard-1'
subnetwork_metadata: dict[str, Any], optional
Any additional metadata needed to identify the subnetwork. See the
individual node implementation to verifiy if additional metadata is needed.
**kwargs
Any additional metadata to be passed along to the /network/status.
See the individual node implementation to verify if additional
metadata is needed.
Returns
-------
NetworkStatusResponse
current_block_identifier: BlockIdentifier
current_block_timestamp: Timestamp
genesis_block_identifier: BlockIdentifier
oldest_block_identifier: BlockIdentifier, optional
sync_status: SyncStatus, optional
peers: List[Peer]
"""
network_id = make_NetworkIdentifier(blockchain, network, subnetwork, subnetwork_metadata)
return self._network_status(network_id, **kwargs)
def _network_supported_options(self, network_id : NetworkIdentifier, **kwargs) -> NetworkOptionsResponse:
"""
Private method for `network_status` to proivde an interface that
supports calls with existing objects.
"""
return net.supported_options(self.url, network_id, self.session, **kwargs)
def current_network_supported_options(self, **kwargs) -> NetworkOptionsResponse:
"""
Get the supported options of the current network.
Parameters
----------
**kwargs
Any additional metadata to be passed along to the /network/options.
See the individual node implementation to verify if additional
metadata is needed.
Returns
--------
NetworkOptionsResponse
version: Version
allow: Allow
See Also
--------
select_network: For selecting a current_network.
Raises
------
RuntimeError: If not current network has been selected.
"""
if self.current_network is None:
raise RuntimeError("No `current_network` has been selected. See `select_network` for selecting a current network.")
return self._network_supported_options(self.current_network, **kwargs)
def network_supported_options(self, blockchain : str, network : str, subnetwork : Optional[str] = None, subnetwork_metadata : Optional[Dict[str, Any]] = None, **kwargs) -> NetworkOptionsResponse:
"""
Get the supported options of a desired network.
Parameters
----------
blockchain: str
The name of the blockchain. Ex: 'bitcoin'
network: str
The chain-id or network identifier. Ex: 'mainnet' or 'testnet'
subnetwork: str, optional
The name or identifier of the subnetwork if needed. Ex: 'shard-1'
subnetwork_metadata: dict[str, Any], optional
Any additional metadata needed to identify the subnetwork. See the
individual node implementation to verifiy if additional metadata is needed.
**kwargs
Any additional metadata to be passed along to the /network/options.
See the individual node implementation to verify if additional
metadata is needed.
Returns
-------
NetworkOptionsResponse
version: Version
allow: Allow
"""
network_id = make_NetworkIdentifier(blockchain, network, subnetwork, subnetwork_metadata)
return self._network_supported_options(network_id, **kwargs)
def _balance(self, network_id : NetworkIdentifier, account_id : AccountIdentifier,
block_id : Optional[PartialBlockIdentifier] = None, currencies : Optional[List[Currency]] = None) -> AccountBalanceResponse:
"""
Private method for the account balance method to proivde an interface that
supports calls with existing objects.
"""
return acnt.balance(self.url, self.current_network, account_id, block_id, currencies, self.session)
def current_network_balance_of_account(self, account_address : str, account_metadata : Optional[Dict[str, Any]] = None,
subaccount_address : Optional[str] = None, subaccount_metadata : Optional[Dict[str, Any]] = None,
block_height : Optional[int] = None, block_hash : Optional[str] = None,
selected_currency_symbols : Optional[Union[str, Iterable[str]]] = None,
selected_currency_decimals : Optional[Union[int, Iterable[int]]] = None,
selected_currency_metadata : Optional[Union[Dict[str, Any], Iterable[Union[Dict[str, Any], None]]]] = None) -> AccountBalanceResponse:
"""
Get the balance of a specified account on the current network.
Parameters
----------
account_address: str
Either a cryptographic key or a username identifying the account.
account_metadata: dict[str, Any], optional
Any additional metadata to identify the Account. Any blockchains that utilize a username
for the address over a public key should specify the public keys here.
subaccount_address: str, optional
Either a cryptographic value or another unique identifier for the SubAccount
subaccount_metadata: dict[str, Any], optional
Any additional metadata needed to uniquely identify a SubAccount. NOTE: Two
SubAccounts with the same address but different metadata are considered different
SubAccounts.
block_height: int, optional
The index of the desired block.
block_hash: str, optional
The hash of the desired block.
selected_currency_symbols: str, Iterable[str], optional
A single str, or an iterable of string of the symbols of the desired currencies to filter
the results upon. If this is specified, `selected_currency_decimals` must also be specified
and of equal length.
selected_currency_decimals: int, Iterable[int], optional
A single int, or an iterable of ints, representing the number of decimals
in the atomic unit of the desired currencies to filter the results upon. If this is specified,
`selected_currency_symbols` must also be specified and of equal length.
selected_currency_metadata: dict[str, Any], Iterable[Union[dict[str, Any], None]], optional
A single dict, or an iterable of dicts, representing the metadata of the
currencies. If this is specified, both `selected_currency_symbols` and `selected_currency_decimals`
must both also be specified and of equal length.
Returns
-------
AccountBalanceResponse
block_identifier: BlockIdentifier
balances: list[Amount]
metadata: dict[str, Any], optional
Raises
------
ValueError: With inconsitencies in the currency parameters.
"""
if account_metadata is None:
account_metadata = {}
account_id = make_AccountIdentifier(account_address, subaccount_address, subaccount_metadata, **account_metadata)
try:
block_id = make_PartialBlockIdentifier(block_height, block_hash)
except ValueError:
block_id = None
if selected_currency_symbols is None:
if not selected_currency_decimals is None:
raise ValueError("Both `selected_curerency_symbols` and `selected_currency_decimals` must be provided if either is.")
if selected_currency_decimals is None:
if not selected_currency_symbols is None:
raise ValueError("Both `selected_curerency_symbols` and `selected_currency_decimals` must be provided if either is.")
if selected_currency_metadata is not None:
if selected_currency_symbols is None or selected_currency_decimals is None:
raise ValueError("If `selected_currency_metadata` is provided, both `selected_curerency_symbols` and `selected_currency_decimals` must be provided")
if selected_currency_decimals is None and selected_currency_symbols is None and selected_currency_metadata is None:
currencies = None
else:
currencies = make_Currencies(selected_currency_symbols, selected_currency_decimals, selected_currency_metadata)
return self._balance(self.current_network, account_id, block_id, currencies)
def balance_of_account_on_network(self, blockchain : str, network : str, account_address : str,
subnetwork : Optional[str] = None, subnetwork_metadata : Optional[Dict[str, Any]] = None,
account_metadata : Optional[Dict[str, Any]] = None,
subaccount_address : Optional[str] = None, subaccount_metadata : Optional[Dict[str, Any]] = None,
block_height : Optional[int] = None, block_hash : Optional[str] = None,
selected_currency_symbols : Optional[Union[str, Iterable[str]]] = None,
selected_currency_decimals : Optional[Union[int, Iterable[int]]] = None,
selected_currency_metadata : Optional[Union[Dict[str, Any], Iterable[Union[Dict[str, Any], None]]]] = None) -> AccountBalanceResponse:
"""
Get the balance of a specified account on the specified network.
Parameters
----------
blockchain: str
The name of the blockchain. Ex: 'bitcoin'
network: str
The chain-id or network identifier. Ex: 'mainnet' or 'testnet'
account_address: str
Either a cryptographic key or a username identifying the account.
subnetwork: str, optional
The name or identifier of the subnetwork if needed. Ex: 'shard-1'
subnetwork_metadata: dict[str, Any], optional
Any additional metadata needed to identify the subnetwork. See the
individual node implementation to verifiy if additional metadata is needed.
account_metadata: dict[str, Any], optional
Any additional metadata to identify the Account. Any blockchains that utilize a username
for the address over a public key should specify the public keys here.
subaccount_address: str, optional
Either a cryptographic value or another unique identifier for the SubAccount
subaccount_metadata: dict[str, Any], optional
Any additional metadata needed to uniquely identify a SubAccount. NOTE: Two
SubAccounts with the same address but different metadata are considered different
SubAccounts.
block_height: int, optional
The index of the desired block.
block_hash: str, optional
The hash of the desired block.
selected_currency_symbols: str, Iterable[str], optional
A single str, or an iterable of string of the symbols of the desired currencies to filter
the results upon. If this is specified, `selected_currency_decimals` must also be specified
and of equal length.
selected_currency_decimals: int, Iterable[int], optional
A single int, or an iterable of ints, representing the number of decimals
in the atomic unit of the desired currencies to filter the results upon. If this is specified,
`selected_currency_symbols` must also be specified and of equal length.
selected_currency_metadata: dict[str, Any], Iterable[Union[dict[str, Any], None]], optional
A single dict, or an iterable of dicts, representing the metadata of the
currencies. If this is specified, both `selected_currency_symbols` and `selected_currency_decimals`
must both also be specified and of equal length.
Returns
-------
AccountBalanceResponse
block_identifier: BlockIdentifier
balances: list[Amount]
metadata: dict[str, Any], optional
Raises
------
ValueError: With inconsitencies in the currency parameters.
"""
network_id = make_NetworkIdentifier(blockchain, network, subnetwork, subnetwork_metadata)
if account_metadata is None:
account_metadata = {}
account_id = make_AccountIdentifier(account_address, subaccount_address, subaccount_metadata, **account_metadata)
try:
block_id = make_PartialBlockIdentifier(block_height, block_hash)
except ValueError:
block_id = None
if selected_currency_symbols is None:
if not selected_currency_decimals is None:
raise ValueError("Both `selected_curerency_symbols` and `selected_currency_decimals` must be provided if either is.")
if selected_currency_decimals is None:
if not selected_currency_symbols is None:
raise ValueError("Both `selected_curerency_symbols` and `selected_currency_decimals` must be provided if either is.")
if selected_currency_metadata is not None:
if selected_currency_symbols is None or selected_currency_decimals is None:
raise ValueError("If `selected_currency_metadata` is provided, both `selected_curerency_symbols` and `selected_currency_decimals` must be provided")
if selected_currency_decimals is None and selected_currency_symbols is None and selected_currency_metadata is None:
currencies = None
else:
currencies = make_Currencies(selected_currency_symbols, selected_currency_decimals, selected_currency_metadata)
return self._balance(network_id, account_id, block_id, currencies)
def _unspent_coins(self, network_id : NetworkIdentifier, account_id : AccountIdentifier, include_mempool : Optional[bool] = False, currencies : Optional[List[Currency]] = None) -> AccountCoinsResponse:
"""
Private method for the account uspent coins method to proivde an interface that
supports calls with existing objects.
"""
return acnt.unspent_coins(self.url, network_id, account_id, include_mempool, currencies, self.session)
def unspent_coins_of_account_on_current_network(self, account_address : str, account_metadata : Optional[Dict[str, Any]] = None,
subaccount_address : Optional[str] = None, subaccount_metadata : Optional[Dict[str, Any]] = None,
include_mempool : Optional[bool] = False,
selected_currency_symbols : Optional[Union[str, Iterable[str]]] = None,
selected_currency_decimals : Optional[Union[int, Iterable[int]]] = None,
selected_currency_metadata : Optional[Union[Dict[str, Any], Iterable[Union[Dict[str, Any], None]]]] = None) -> AccountCoinsResponse:
"""
Get the unspent coins of a specified account on the current network.
Parameters
----------
account_address: str
Either a cryptographic key or a username identifying the account.
account_metadata: dict[str, Any], optional
Any additional metadata to identify the Account. Any blockchains that utilize a username
for the address over a public key should specify the public keys here.
subaccount_address: str, optional
Either a cryptographic value or another unique identifier for the SubAccount
subaccount_metadata: dict[str, Any], optional
Any additional metadata needed to uniquely identify a SubAccount. NOTE: Two
SubAccounts with the same address but different metadata are considered different
SubAccounts.
include_mempool: bool, optional
Include the state from the mempool when looking up an account's unspent coins. NOTE:
using this functionality breaks any guarantee of idempotency. Defaults to False.
selected_currency_symbols: str, Iterable[str], optional
A single str, or an iterable of string of the symbols of the desired currencies to filter
the results upon. If this is specified, `selected_currency_decimals` must also be specified
and of equal length.
selected_currency_decimals: int, Iterable[int], optional
A single int, or an iterable of ints, representing the number of decimals
in the atomic unit of the desired currencies to filter the results upon. If this is specified,
`selected_currency_symbols` must also be specified and of equal length.
selected_currency_metadata: dict[str, Any], Iterable[Union[dict[str, Any], None]], optional
A single dict, or an iterable of dicts, representing the metadata of the
currencies. If this is specified, both `selected_currency_symbols` and `selected_currency_decimals`
must both also be specified and of equal length.
Returns
-------
AccountCoinsResponse
account_identifer: AccountIdentifier
coins: list[Coin]
metadata: dict[str, Any], optional
Raises
------
ValueError: With inconsitencies in the currency parameters.
"""
if account_metadata is None:
account_metadata = {}
account_id = make_AccountIdentifier(account_address, subaccount_address, subaccount_metadata, **account_metadata)
try:
block_id = make_PartialBlockIdentifier(block_height, block_hash)
except ValueError:
block_id = None
if selected_currency_symbols is None:
if not selected_currency_decimals is None:
raise ValueError("Both `selected_curerency_symbols` and `selected_currency_decimals` must be provided if either is.")
if selected_currency_decimals is None:
if not selected_currency_symbols is None:
raise ValueError("Both `selected_curerency_symbols` and `selected_currency_decimals` must be provided if either is.")
if selected_currency_metadata is not None:
if selected_currency_symbols is None or selected_currency_decimals is None:
raise ValueError("If `selected_currency_metadata` is provided, both `selected_curerency_symbols` and `selected_currency_decimals` must be provided")
if selected_currency_decimals is None and selected_currency_symbols is None and selected_currency_metadata is None:
currencies = None
else:
currencies = make_Currencies(selected_currency_symbols, selected_currency_decimals, selected_currency_metadata)
return self._unspent_coins(self.current_network, account_id, include_mempool, currencies)
def unspent_coins_of_account_on_network(self, blockchain : str, network : str, account_address : str,
subnetwork : Optional[str] = None, subnetwork_metadata : Optional[Dict[str, Any]] = None,
account_metadata : Optional[Dict[str, Any]] = None,
subaccount_address : Optional[str] = None, subaccount_metadata : Optional[Dict[str, Any]] = None,
include_mempool : Optional[bool] = False,
selected_currency_symbols : Optional[Union[str, Iterable[str]]] = None,
selected_currency_decimals : Optional[Union[int, Iterable[int]]] = None,
selected_currency_metadata : Optional[Union[Dict[str, Any], Iterable[Union[Dict[str, Any], None]]]] = None) -> AccountBalanceResponse:
"""
Get the unspent coins of a specified account on the specified network.
Parameters
----------
blockchain: str
The name of the blockchain. Ex: 'bitcoin'
network: str
The chain-id or network identifier. Ex: 'mainnet' or 'testnet'
account_address: str
Either a cryptographic key or a username identifying the account.
subnetwork: str, optional
The name or identifier of the subnetwork if needed. Ex: 'shard-1'
subnetwork_metadata: dict[str, Any], optional
Any additional metadata needed to identify the subnetwork. See the
individual node implementation to verifiy if additional metadata is needed.
account_metadata: dict[str, Any], optional
Any additional metadata to identify the Account. Any blockchains that utilize a username
for the address over a public key should specify the public keys here.
subaccount_address: str, optional
Either a cryptographic value or another unique identifier for the SubAccount
subaccount_metadata: dict[str, Any], optional
Any additional metadata needed to uniquely identify a SubAccount. NOTE: Two
SubAccounts with the same address but different metadata are considered different
SubAccounts.
include_mempool: bool, optional
Include the state from the mempool when looking up an account's unspent coins. NOTE:
using this functionality breaks any guarantee of idempotency. Defaults to False.
selected_currency_symbols: str, Iterable[str], optional
A single str, or an iterable of string of the symbols of the desired currencies to filter
the results upon. If this is specified, `selected_currency_decimals` must also be specified
and of equal length.
selected_currency_decimals: int, Iterable[int], optional
A single int, or an iterable of ints, representing the number of decimals
in the atomic unit of the desired currencies to filter the results upon. If this is specified,
`selected_currency_symbols` must also be specified and of equal length.
selected_currency_metadata: dict[str, Any], Iterable[Union[dict[str, Any], None]], optional
A single dict, or an iterable of dicts, representing the metadata of the
currencies. If this is specified, both `selected_currency_symbols` and `selected_currency_decimals`
must both also be specified and of equal length.
Returns
-------
AccountCoinsResponse
account_identifer: AccountIdentifier
coins: list[Coin]
metadata: dict[str, Any], optional
Raises
------
ValueError: With inconsitencies in the currency parameters.
"""
network_id = make_NetworkIdentifier(blockchain, network, subnetwork, subnetwork_metadata)
if account_metadata is None:
account_metadata = {}
account_id = make_AccountIdentifier(account_address, subaccount_address, subaccount_metadata, **account_metadata)
if selected_currency_symbols is None:
if not selected_currency_decimals is None:
raise ValueError("Both `selected_curerency_symbols` and `selected_currency_decimals` must be provided if either is.")
if selected_currency_decimals is None:
if not selected_currency_symbols is None:
raise ValueError("Both `selected_curerency_symbols` and `selected_currency_decimals` must be provided if either is.")
if selected_currency_metadata is not None:
if selected_currency_symbols is None or selected_currency_decimals is None:
raise ValueError("If `selected_currency_metadata` is provided, both `selected_curerency_symbols` and `selected_currency_decimals` must be provided")
if selected_currency_decimals is None and selected_currency_symbols is None and selected_currency_metadata is None:
currencies = None
else:
currencies = make_Currencies(selected_currency_symbols, selected_currency_decimals, selected_currency_metadata)
return self._unspent_coins(network_id, account_id, include_mempool, currencies)
def _block(self, network_id : NetworkIdentifier, block_id : PartialBlockIdentifier) -> BlockResponse:
"""
Private method for the get block method to proivde an interface that
supports calls with existing objects.
"""
return blk.block(self.url, network_id, block_id, self.session)
def block_on_current_network(self, block_height : Optional[int] = None, block_hash : Optional[str] = None) -> BlockResponse:
"""
Get a block on the current network by either block height, or its hash.
NOTE: At least the `block_height` or `block_hash` needs to be specified.
Parameters
----------
block_height: int, optional
The index of the block
block_hash: str, optional
The hash of the block.
Returns
-------
BlockResponse
block: Block
other_transactions: list[TransactionIdentifier], optional
Raises
------
ValueError: if neither `block_height` or `block_hash` are specified.
"""
try:
block_id = make_PartialBlockIdentifier(block_height, block_hash)
except ValueError:
raise ValueError("Either the `block_height` or the `block_hash` must be specified.")
return self._block(self.current_network, block_id)
def block_on_network(self, blockchain : str, network : str,
block_height : Optional[int] = None, block_hash : Optional[str] = None,
subnetwork : Optional[str] = None, subnetwork_metadata : Optional[Dict[str, Any]] = None) -> BlockResponse:
"""
Get a block on the specified network by either block height, or its hash.
NOTE: At least the `block_height` or `block_hash` needs to be specified.
Parameters
----------
blockchain: str
The name of the blockchain. Ex: 'bitcoin'
network: str
The chain-id or network identifier. Ex: 'mainnet' or 'testnet'
block_height: int, optional
The index of the block
block_hash: str, optional
The hash of the block.
subnetwork: str, optional
The name or identifier of the subnetwork if needed. Ex: 'shard-1'
subnetwork_metadata: dict[str, Any], optional
Any additional metadata needed to identify the subnetwork. See the
individual node implementation to verifiy if additional metadata is needed.
Returns
-------
BlockResponse
block: Block
other_transactions: list[TransactionIdentifier], optional
"""
network_id = make_NetworkIdentifier(blockchain, network, subnetwork, subnetwork_metadata)
try:
block_id = make_PartialBlockIdentifier(block_height, block_hash)
except ValueError:
raise ValueError("Either the `block_height` or the `block_hash` must be specified.")
return self._block(network_id, block_id)
def _block_transaction(self, network_id : NetworkIdentifier, block_id : BlockIdentifier, transaction_id : TransactionIdentifier) -> BlockTransactionResponse:
"""
Private method for the get block transaction method to proivde an interface that
supports calls with existing objects.
"""
return blk.transaction(self.url, network_id, block_id, transaction_id, self.session)
def block_transaction_on_current_network(self, block_height : int, block_hash : str, transaction_hash : str) -> Transaction:
"""
Get the specified transaciton on the given block for the current network.
Parameters
----------
block_height: int
The index of the block.
block_hash: str
The hash of the block.
transaction_hash: str
The hash of the transaction.
Returns
-------
Transaction
transaction_identifier: TransactionIdentifier
operations: list[Operation]
related_transactions: list[RelatedTransaction], optional
metadata: dict[str, Any], optional
"""
block_id = BlockIdentifier(index=block_height, hash=block_hash)
transaction_id = TransactionIdentifier(hash=transaction_hash)
return self._block_transaction(self.current_network, block_id, transaction_id)
def block_transaction_on_network(self, blockchain : str, network : str,
block_height: int, block_hash : str, transaction_hash : str,
subnetwork : Optional[str] = None, subnetwork_metadata : Optional[Dict[str, Any]] = None) -> Transaction:
"""
Get the specified transaction on the given block for the specified network.
Parameters
----------
blockchain: str
The name of the blockchain. Ex: 'bitcoin'
network: str
The chain-id or network identifier. Ex: 'mainnet' or 'testnet'
block_height: int
The index of the block.
block_hash: str
The hash of the block.
transaction_hash: str
The hash of the transaction.
subnetwork: str, optional
The name or identifier of the subnetwork if needed. Ex: 'shard-1'
subnetwork_metadata: dict[str, Any], optional
Any additional metadata needed to identify the subnetwork. See the
individual node implementation to verifiy if additional metadata is needed.
Returns
-------
Transaction
transaction_identifier: TransactionIdentifier
operations: list[Operation]
related_transactions: list[RelatedTransaction], optional
metadata: dict[str, Any], optional
"""
network_id = make_NetworkIdentifier(blockchain, network, subnetwork, subnetwork_metadata)
block_id = BlockIdentifier(index=block_height, hash=block_hash)
transaction_id = TransactionIdentifier(hash=transaction_hash)
return self._block_transaction(network_id, block_id, transaction_id)
def _all_mempool_transactions(self, network_id : NetworkIdentifier, **kwargs) -> List[TransactionIdentifier]:
"""
Private method for the get all mempool transaction method to proivde an interface that
supports calls with existing objects.
"""
return memp.all_transactions(self.url, network_id, self.session, **kwargs)
def all_mempool_transactions_on_current_network(self, **kwargs) -> List[TransactionIdentifier]:
"""
Get all the transactions in the mempool of the current network.
Parameters
----------
**kwargs
Any additional metadata to be passed along to the /mempool request.
See the individual node implementation to verify if additional
metadata is needed.
Returns
-------
list[TransactionIdentifier]
"""
return self._all_mempool_transactions(self.current_network, **kwargs)
def all_mempool_transactions_on_network(self, blockchain : str, network : str, subnetwork :
Optional[str] = None, subnetwork_metadata : Optional[Dict[str, Any]] = None,
**kwargs) -> List[TransactionIdentifier]:
"""
Get all the transactions in the mempool of the specified network.
Parameters
----------
blockchain: str
The name of the blockchain. Ex: 'bitcoin'
network: str
The chain-id or network identifier. Ex: 'mainnet' or 'testnet'
subnetwork: str, optional
The name or identifier of the subnetwork if needed. Ex: 'shard-1'
subnetwork_metadata: dict[str, Any], optional
Any additional metadata needed to identify the subnetwork. See the
individual node implementation to verifiy if additional metadata is needed.
**kwargs
Any additional metadata to be passed along to the /mempool request.
See the individual node implementation to verify if additional
metadata is needed.
"""
network_id = make_NetworkIdentifier(blockchain, network, subnetwork, subnetwork_metadata)
return self._all_mempool_transactions(network_id, **kwargs)
def _mempool_transaction(self, network_id : NetworkIdentifier, transaction_id : TransactionIdentifier) -> MempoolTransactionResponse:
"""
Private method for the get transaction from mempool method to proivde an interface that
supports calls with existing objects.
"""
return memp.transaction(self.url, network_id, transaction_id, self.session)
def mempool_transaction_on_current_network(self, transaction_hash : str) -> MempoolTransactionResponse:
"""
Get the specified transaction from the mempool of the current network.
Parameters
----------
transaction_hash: str
The hash of the transaction.
Returns
-------
MempoolTransactionResponse
transaction_identifier: TransactionIdentifier
metadata: dict[str, Any], optional
"""
transaction_id = TransactionIdentifier(hash=transaction_hash)
return self._mempool_transaction(self.current_network, transaction_id)
def mempool_transaction_on_network(self, blockchain : str, network : str, transaction_hash : str,
subnetwork : Optional[str] = None,
subnetwork_metadata : Optional[Dict[str, Any]] = None) -> MempoolTransactionResponse:
"""
Get the specified transaction from the mempool on the specified network.
Parameters
----------
blockchain: str
The name of the blockchain. Ex: 'bitcoin'
network: str
The chain-id or network identifier. Ex: 'mainnet' or 'testnet'
transaction_hash: str
The hash of the transaction
subnetwork: str, optional
The name or identifier of the subnetwork if needed. Ex: 'shard-1'
subnetwork_metadata: dict[str, Any], optional
Any additional metadata needed to identify the subnetwork. See the
individual node implementation to verifiy if additional metadata is needed.
Returns
-------
MempoolTransactionResponse
transaction_identifier: TransactionIdentifier
metadata: dict[str, Any], optional
"""
network_id = NetworkIdentifier(blockchain, network, subnetwork, subnetwork_metadata)
transaction_id = TransactionIdentifier(hash=transaction_hash)
return self._mempool_transaction(network_id, transaction_id)
class RosettaAPIExt(RosettaAPI):
"""
This API object will include some generalized helper methods, that can't be guarenteed to
work for all Rosetta implementations, but in the general case, might provide to be helpful
in most cases.
"""
def discover_networks(self, network_metadata : Optional[Dict[str, Any]] = None, **kwargs) -> List[net.NetworkOverview]:
"""
Discover available networks and get the supported options and status for each.
Parameters
----------
network_metadata: dict[str, Any], optional:
Any additional metadata to be passed along to the /network/options
and /network/status routes. See the individual node implementation
to verify if additional metadata is needed.
**kwargs
Any additional metadata to be passed along to the /network/list request.
See the individual node implementation to verify if additional
metadata is needed.
Returns
-------
list[NetworkOverview]
network: NetworkIdentifier
options: NetworkOptionsResponse
status: NetworkStatusResponse
Fails When
-----------
The /network/options and /network/status endpoints need additional, but
different metadata.
"""
return net.discover(self.url, self.session, network_metadata, **kwargs)
| 48.709567
| 224
| 0.649496
| 4,638
| 42,767
| 5.840233
| 0.05843
| 0.063794
| 0.022151
| 0.019936
| 0.881751
| 0.855798
| 0.826079
| 0.809466
| 0.796508
| 0.783549
| 0
| 0.000295
| 0.285617
| 42,767
| 877
| 225
| 48.765108
| 0.886292
| 0.453995
| 0
| 0.528926
| 0
| 0.004132
| 0.093657
| 0.040929
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132231
| false
| 0
| 0.045455
| 0.012397
| 0.305785
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8e657bd801f9bb6977169eef4ce64d46e826ea72
| 102
|
py
|
Python
|
booster/models/__init__.py
|
zknight/booster
|
7b335e9206c3ec2b46314becb381e266b72aedcb
|
[
"MIT"
] | null | null | null |
booster/models/__init__.py
|
zknight/booster
|
7b335e9206c3ec2b46314becb381e266b72aedcb
|
[
"MIT"
] | null | null | null |
booster/models/__init__.py
|
zknight/booster
|
7b335e9206c3ec2b46314becb381e266b72aedcb
|
[
"MIT"
] | null | null | null |
from user import *
from event import *
from news import *
from picture import *
from product import *
| 17
| 21
| 0.754902
| 15
| 102
| 5.133333
| 0.466667
| 0.519481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196078
| 102
| 5
| 22
| 20.4
| 0.939024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d910c9eb856b8902b4692fd9afbaa1c4c2a20f9c
| 5,406
|
py
|
Python
|
model/vae.py
|
cheonbok94/Pytorch-Latent-Constraints-Learning-to-Generate-Conditionally-from-Unconditional-Generative-Models
|
0dbd182b294e0c6d3ad0deda3be1dd855fd57617
|
[
"MIT"
] | 10
|
2018-07-13T06:09:59.000Z
|
2021-03-02T13:40:41.000Z
|
model/vae.py
|
cheonbok94/Pytorch-Latent-Constraints-Learning-to-Generate-Conditionally-from-Unconditional-Generative-Models
|
0dbd182b294e0c6d3ad0deda3be1dd855fd57617
|
[
"MIT"
] | 1
|
2021-08-12T08:43:08.000Z
|
2021-08-12T08:43:08.000Z
|
model/vae.py
|
cheonbok94/Pytorch-Latent-Constraints-Learning-to-Generate-Conditionally-from-Unconditional-Generative-Models
|
0dbd182b294e0c6d3ad0deda3be1dd855fd57617
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.autograd import Variable
from .sub_layer import Linear,View
import pdb
class Celeba_VAE(nn.Module):
def __init__(self,input_size=128,d_model=1024,layer_num=3):
super(Celeba_VAE,self).__init__()
self.d_model = d_model
self.layer_num = layer_num
self.encoder = self.build_encoder(self.d_model,self.layer_num)
self.sig_layer = nn.Softplus()
self.decoder = self.build_decoder(self.d_model,self.layer_num)
self.sigmoid = nn.Sigmoid()
def build_encoder(self,d_model,layer_num):
encoder_layerList = []
encoder_layerList.append(nn.Conv2d(in_channels = 3,out_channels = 256,kernel_size = 5,stride =2,padding=1))
#encoder_layerList.append(nn.BatchNorm2d(256))
encoder_layerList.append(nn.ReLU())
encoder_layerList.append(nn.Conv2d(in_channels = 256 ,out_channels = 256*2 , kernel_size = 5,stride =2,padding=1))
#encoder_layerList.append(nn.BatchNorm2d(256*2))
encoder_layerList.append(nn.ReLU())
encoder_layerList.append(nn.Conv2d(in_channels=512 , out_channels = 1024, kernel_size = 3,stride =2,padding=1))
#encoder_layerList.append(nn.BatchNorm2d(512*2))
encoder_layerList.append(nn.ReLU())
encoder_layerList.append(nn.Conv2d(in_channels = 1024,out_channels = 2048,kernel_size = 3,stride =2,padding=1))
#encoder_layerList.append(nn.BatchNorm2d(1024*2))
encoder_layerList.append(nn.ReLU())
encoder_layerList.append(View())
encoder_layerList.append(nn.Linear(4*4*2048,2048))
return nn.Sequential(*encoder_layerList)
def build_decoder(self,d_model,layer_num):
decoder_layerList = []
decoder_layerList
decoder_layerList.append(nn.Linear(d_model,2048*4*4))
decoder_layerList.append(View([2048,4,4]))
decoder_layerList.append(nn.ConvTranspose2d(2048,1024,3,stride=2 ,padding =1 ,output_padding=1))
#decoder_layerList.append(nn.BatchNorm2d(1024))
decoder_layerList.append(nn.ReLU())
decoder_layerList.append(nn.ConvTranspose2d(1024,512,3,stride=2 ,padding =1 ,output_padding=0))
#decoder_layerList.append(nn.BatchNorm2d(512))
decoder_layerList.append(nn.ReLU())
decoder_layerList.append(nn.ConvTranspose2d(512,256,5,stride=2,padding=1 ,output_padding=0))
#decoder_layerList.append(nn.BatchNorm2d(256))
decoder_layerList.append(nn.ReLU())
decoder_layerList.append(nn.ConvTranspose2d(256,3,5,stride=2 ,padding = 1 ,output_padding =1))
return nn.Sequential(*decoder_layerList)
def reparameterize(self,mu,sig_var):
## need to understand
if self.training:
std = sig_var # need to check sig_var is log (sigma^2)
eps = std.data.new(std.size()).normal_(std=1)
return eps.mul(std).add_(mu)
else:
return mu
def encode(self,x):
encoder_out = self.encoder(x)
sig_var , mu_var = encoder_out.chunk(2,dim=-1)
sig_var = self.sig_layer(sig_var)
z = self.reparameterize(mu_var,sig_var)
return sig_var,mu_var,z
def decode(self,z):
output = self.decoder(z)
output = self.sigmoid(output)
return output
def forward(self,x):
encoder_out = self.encoder(x)
sig_var , mu_var = encoder_out.chunk(2,dim=-1)
sig_var = self.sig_layer(sig_var)
z = self.reparameterize(mu_var,sig_var)
output = self.decoder(z)
output = self.sigmoid(output)
return output,z,mu_var,sig_var
class Mnist_VAE(nn.Module):
def __init__(self,input_dim=28*28,d_model=1024,layer_num=3):
super(Mnist_VAE,self).__init__()
self.d_model = d_model
self.layer_num = layer_num
self.input_dim = input_dim
self.encoder = self.build_encoder(self.input_dim,self.d_model,self.layer_num)
self.sig_layer = nn.Softplus()
self.decoder = self.build_decoder(self.input_dim,self.d_model,self.layer_num)
self.sigmoid = nn.Sigmoid()
def build_encoder(self,input_dim,d_model,layer_num):
encoder_layerList = []
for i in range(layer_num):
if i == 0 :
encoder_layerList.append(nn.Linear(input_dim,d_model))
else:
encoder_layerList.append(nn.Linear(d_model,d_model))
encoder_layerList.append(nn.ReLU())
encoder_layerList.append(nn.Linear(d_model,2*d_model))
return nn.Sequential(*encoder_layerList)
def build_decoder(self,input_dim,d_model,layer_num):
decoder_layerList = []
for i in range(layer_num):
decoder_layerList.append(nn.Linear(d_model,d_model))
decoder_layerList.append(nn.ReLU())
decoder_layerList.append(nn.Linear(d_model,input_dim))
return nn.Sequential(*decoder_layerList)
def reparameterize(self,mu,sig_var):
## need to understand
if self.training:
std = sig_var # need to check sig_var is log (sigma^2)
eps = std.data.new(std.size()).normal_(std=1)
return eps.mul(std).add_(mu)
else:
return mu
def encode(self,x):
x = x.view(-1,28*28)
encoder_out = self.encoder(x)
sig_var , mu_var = encoder_out.chunk(2,dim=-1)
sig_var = self.sig_layer(sig_var)
z = self.reparameterize(mu_var,sig_var)
return sig_var,mu_var,z
def decode(self,z):
output = self.decoder(z)
output = self.sigmoid(output)
return output
def forward(self,x):
x = x.view(-1,28*28)
encoder_out = self.encoder(x)
sig_var , mu_var = encoder_out.chunk(2,dim=-1)
sig_var = self.sig_layer(sig_var)
z = self.reparameterize(mu_var,sig_var)
output = self.decoder(z)
output = self.sigmoid(output)
return output,z,mu_var,sig_var
| 33.57764
| 117
| 0.721606
| 837
| 5,406
| 4.431302
| 0.106332
| 0.133459
| 0.142087
| 0.110003
| 0.912645
| 0.877865
| 0.829334
| 0.72742
| 0.708547
| 0.638177
| 0
| 0.039861
| 0.150758
| 5,406
| 160
| 118
| 33.7875
| 0.768024
| 0.081206
| 0
| 0.672269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.042017
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d96e8249519af582022fb08bfb38f12920e0b4fb
| 19
|
py
|
Python
|
plugins/pelican-toc/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 13
|
2020-01-27T09:02:25.000Z
|
2022-01-20T07:45:26.000Z
|
plugins/pelican-toc/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 110
|
2017-08-11T12:54:00.000Z
|
2022-03-20T22:04:20.000Z
|
plugins/pelican-toc/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 59
|
2017-11-07T05:04:42.000Z
|
2022-03-22T19:39:23.000Z
|
from .toc import *
| 9.5
| 18
| 0.684211
| 3
| 19
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
794a2b93e64430465d077ba6e09c84da1dafe8dc
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/core/packages/utils/utils.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/core/packages/utils/utils.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/core/packages/utils/utils.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/0c/1f/be/81c5f1985c122a8a1b09de03082e9c77a993fa809dc5855c6cc1a9137a
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 96
| 1
| 96
| 96
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7997b051ab8fb84c8959a51929714e3537a0cbc4
| 35
|
py
|
Python
|
test/test_adapter.py
|
EarnestResearch/dbt-athena
|
2409993aade3791bb5e889feb99e014613e8d12a
|
[
"Apache-2.0"
] | 43
|
2020-01-14T18:55:42.000Z
|
2022-03-23T12:16:59.000Z
|
test/test_adapter.py
|
EarnestResearch/dbt-athena
|
2409993aade3791bb5e889feb99e014613e8d12a
|
[
"Apache-2.0"
] | 19
|
2020-01-17T10:02:07.000Z
|
2021-08-05T21:41:25.000Z
|
test/test_adapter.py
|
EarnestResearch/dbt-athena
|
2409993aade3791bb5e889feb99e014613e8d12a
|
[
"Apache-2.0"
] | 14
|
2020-01-18T17:49:48.000Z
|
2020-12-16T09:44:17.000Z
|
def test_config():
assert True
| 11.666667
| 18
| 0.685714
| 5
| 35
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228571
| 35
| 2
| 19
| 17.5
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5c391bdca6c76bf7f294fb844bee38c0c1b6a128
| 52
|
py
|
Python
|
zmq_cache_client/__init__.py
|
yunluyl/zmq_cache_client_py
|
8ab691f9b871f1b84beee66a8e59d0d2a18db17e
|
[
"MIT"
] | null | null | null |
zmq_cache_client/__init__.py
|
yunluyl/zmq_cache_client_py
|
8ab691f9b871f1b84beee66a8e59d0d2a18db17e
|
[
"MIT"
] | null | null | null |
zmq_cache_client/__init__.py
|
yunluyl/zmq_cache_client_py
|
8ab691f9b871f1b84beee66a8e59d0d2a18db17e
|
[
"MIT"
] | null | null | null |
from zmq_cache_client.zmq_cache import Cache, Table
| 26
| 51
| 0.865385
| 9
| 52
| 4.666667
| 0.666667
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 52
| 1
| 52
| 52
| 0.893617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5c3a13208748c22a52df95efe1836670d2e90288
| 24,332
|
py
|
Python
|
tests/test_reportseff.py
|
troycomi/reportseff
|
0bf03c9ad180cb5cd2714b454520cb7824736347
|
[
"MIT"
] | 12
|
2020-09-23T15:03:06.000Z
|
2022-03-25T23:19:26.000Z
|
tests/test_reportseff.py
|
troycomi/reportseff
|
0bf03c9ad180cb5cd2714b454520cb7824736347
|
[
"MIT"
] | 3
|
2021-06-08T13:13:19.000Z
|
2021-10-13T15:54:32.000Z
|
tests/test_reportseff.py
|
troycomi/reportseff
|
0bf03c9ad180cb5cd2714b454520cb7824736347
|
[
"MIT"
] | 2
|
2021-04-20T10:57:07.000Z
|
2022-02-23T19:14:47.000Z
|
"""Test cli usage."""
from click.testing import CliRunner
import pytest
from reportseff import console
from reportseff.db_inquirer import SacctInquirer
from reportseff.job_collection import JobCollection
from reportseff.output_renderer import OutputRenderer
@pytest.fixture
def mock_inquirer(mocker):
"""Override valid formats to prevent calls to shell."""
def mock_valid(self):
return (
"JobID,State,Elapsed,JobIDRaw,State,TotalCPU,AllocCPUS,"
"REQMEM,NNodes,MaxRSS,Timelimit"
).split(",")
mocker.patch.object(SacctInquirer, "get_valid_formats", new=mock_valid)
def test_directory_input(mocker, mock_inquirer):
"""Able to get jobs from directory calls."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|01:27:42|24418435|24418435||1|1Gn|"
"COMPLETED|03:00:00|01:27:29\n"
"1|01:27:42|24418435.batch|24418435.batch|499092K|1|1Gn|"
"COMPLETED||01:27:29\n"
"1|01:27:42|24418435.extern|24418435.extern|1376K|1|1Gn|"
"COMPLETED||00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
def set_jobs(self, directory):
self.set_jobs(("24418435",))
mocker.patch.object(JobCollection, "set_out_dir", new=set_jobs)
result = runner.invoke(
console.main,
"--no-color",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:]
assert output[0].split() == [
"24418435",
"COMPLETED",
"01:27:42",
"48.7%",
"99.8%",
"47.7%",
]
def test_directory_input_exception(mocker, mock_inquirer):
"""Catch exceptions in setting jobs from directory."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"24418435|24418435|COMPLETED|1|"
"01:27:29|01:27:42|03:00:00|1Gn||1|\n"
"24418435.batch|24418435.batch|COMPLETED|1|"
"01:27:29|01:27:42||1Gn|499092K|1|1\n"
"24418435.extern|24418435.extern|COMPLETED|1|"
"00:00:00|01:27:42||1Gn|1376K|1|1\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
def set_jobs(self, directory):
raise ValueError("Testing EXCEPTION")
mocker.patch.object(JobCollection, "set_out_dir", new=set_jobs)
result = runner.invoke(console.main, "--no-color")
assert result.exit_code == 1
assert "Testing EXCEPTION" in result.output
def test_debug_option(mocker, mock_inquirer):
"""Setting debug prints subprocess result."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"16|00:00:00|23000233|23000233||1|4000Mc|"
"CANCELLED by 129319|6-00:00:00|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color --debug 23000233",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")
assert output[0] == (
"16|00:00:00|23000233|23000233||1|4000Mc|"
"CANCELLED by 129319|6-00:00:00|00:00:00"
)
assert output[3].split() == [
"23000233",
"CANCELLED",
"00:00:00",
"0.0%",
"---",
"0.0%",
]
def test_process_failure(mocker, mock_inquirer):
"""Catch exceptions in process_entry by printing the offending entry."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"16|00:00:00|23000233|23000233||1|4000Mc|"
"CANCELLED by 129319|6-00:00:00|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
mocker.patch.object(
JobCollection, "process_entry", side_effect=Exception("TESTING")
)
result = runner.invoke(
console.main,
"--no-color 23000233 --format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code != 0
# remove header
output = result.output.split("\n")
assert output[0] == "Error processing entry: " + (
"{'AllocCPUS': '16', 'Elapsed': '00:00:00', 'JobID': '23000233', "
"'JobIDRaw': '23000233', 'MaxRSS': '', 'NNodes': '1', "
"'REQMEM': '4000Mc', 'State': 'CANCELLED by 129319', "
"'TotalCPU': '6-00:00:00'}"
)
def test_short_output(mocker, mock_inquirer):
"""Outputs with 20 or fewer entries are directly printed."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"23000233|23000233|CANCELLED by 129319|16|"
"00:00:00|00:00:00|6-00:00:00|4000Mc||1|\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
mocker.patch("reportseff.console.len", return_value=20)
mocker.patch.object(OutputRenderer, "format_jobs", return_value="output")
mock_click = mocker.patch("reportseff.console.click.echo")
result = runner.invoke(console.main, "--no-color 23000233")
assert result.exit_code == 0
mock_click.assert_called_once_with("output", color=False)
def test_long_output(mocker, mock_inquirer):
"""Outputs with more than 20 entries are echoed via pager."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"16|00:00:00|23000233|23000233||1|4000Mc|CANCELLED by 129319|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
mocker.patch("reportseff.console.len", return_value=21)
mocker.patch.object(OutputRenderer, "format_jobs", return_value="output")
mock_click = mocker.patch("reportseff.console.click.echo_via_pager")
result = runner.invoke(console.main, "--no-color 23000233")
assert result.exit_code == 0
mock_click.assert_called_once_with("output", color=False)
def test_simple_job(mocker, mock_inquirer):
"""Can get efficiency from a single job."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|01:27:42|24418435|24418435||1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.batch|24418435.batch|499092K|1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.extern|24418435.extern|1376K|1|1Gn|"
"COMPLETED|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color 24418435 --format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:]
assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"]
def test_simple_user(mocker, mock_inquirer):
"""Can limit outputs by user."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|01:27:42|24418435|24418435||1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.batch|24418435.batch|499092K|1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.extern|24418435.extern|1376K|1|1Gn|"
"COMPLETED|00:00:00\n"
"1|21:14:48|25569410|25569410||1|4000Mc|COMPLETED|19:28:36\n"
"1|21:14:49|25569410.extern|25569410.extern|1548K|1|4000Mc|"
"COMPLETED|00:00:00\n"
"1|21:14:43|25569410.0|25569410.0|62328K|1|4000Mc|COMPLETED|19:28:36\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color --user test --format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:]
assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"]
assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.6%"]
def test_format_add(mocker, mock_inquirer):
"""Can add to format specifier."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
mock_jobs = mocker.patch("reportseff.console.get_jobs", return_value=("Testing", 1))
result = runner.invoke(console.main, "--no-color --format=test")
assert result.exit_code == 0
assert mock_jobs.call_args[1]["format_str"] == "test"
# test adding onto end
result = runner.invoke(console.main, "--no-color --format=+test")
assert result.exit_code == 0
assert (
mock_jobs.call_args[1]["format_str"]
== "JobID%>,State,Elapsed%>,TimeEff,CPUEff,MemEff,test"
)
def test_since(mocker, mock_inquirer):
"""Can limit outputs by time since argument."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|01:27:42|24418435|24418435||1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.batch|24418435.batch|499092K|1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.extern|24418435.extern|1376K|1|1Gn|"
"COMPLETED|00:00:00\n"
"1|21:14:48|25569410|25569410||1|4000Mc|COMPLETED|19:28:36\n"
"1|21:14:49|25569410.extern|25569410.extern|1548K|1|4000Mc|"
"COMPLETED|00:00:00\n"
"1|21:14:43|25569410.0|25569410.0|62328K|1|4000Mc|COMPLETED|19:28:36\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color --since 200406 24418435 25569410 "
"--format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:]
assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"]
assert output[1].split() == ["25569410", "COMPLETED", "21:14:48", "91.7%", "1.6%"]
def test_simple_state(mocker, mock_inquirer):
"""Can limit outputs by filtering state."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|01:27:42|24418435|24418435||1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.batch|24418435.batch|499092K|1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.extern|24418435.extern|1376K|1|1Gn|"
"COMPLETED|00:00:00\n"
"1|21:14:48|25569410|25569410||1|4000Mc|RUNNING|19:28:36\n"
"1|21:14:49|25569410.extern|25569410.extern|1548K|1|4000Mc|"
"RUNNING|00:00:00\n"
"1|21:14:43|25569410.0|25569410.0|62328K|1|4000Mc|RUNNING|19:28:36\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color --state completed "
"25569410 24418435 --format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:]
assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"]
# other is suppressed by state filter
assert output[1].split() == []
def test_simple_not_state(mocker, mock_inquirer):
"""Can limit outputs by removing state."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|01:27:42|24418435|24418435||1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.batch|24418435.batch|499092K|1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.extern|24418435.extern|1376K|1|1Gn|"
"COMPLETED|00:00:00\n"
"1|21:14:48|25569410|25569410||1|4000Mc|RUNNING|19:28:36\n"
"1|21:14:49|25569410.extern|25569410.extern|1548K|1|4000Mc|"
"RUNNING|00:00:00\n"
"1|21:14:43|25569410.0|25569410.0|62328K|1|4000Mc|RUNNING|19:28:36\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color --not-state Running "
"25569410 24418435 --format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:]
assert output[0].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"]
# other is suppressed by state filter
assert output[1].split() == []
def test_invalid_not_state(mocker, mock_inquirer):
"""When not state isn't found, return all jobs."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|01:27:42|24418435|24418435||1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.batch|24418435.batch|499092K|1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.extern|24418435.extern|1376K|1|1Gn|"
"COMPLETED|00:00:00\n"
"1|21:14:48|25569410|25569410||1|4000Mc|RUNNING|19:28:36\n"
"1|21:14:49|25569410.extern|25569410.extern|1548K|1|4000Mc|"
"RUNNING|00:00:00\n"
"1|21:14:43|25569410.0|25569410.0|62328K|1|4000Mc|RUNNING|19:28:36\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color --not-state unning "
"25569410 24418435 --format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")
print(output)
assert output[0] == "Unknown state UNNING"
assert output[1] == "No valid states provided to exclude"
# output 2 is header
assert output[3].split() == ["24418435", "COMPLETED", "01:27:42", "99.8%", "47.7%"]
assert output[4].split() == ["25569410", "RUNNING", "21:14:48", "---", "---"]
assert output[5].split() == []
def test_no_state(mocker, mock_inquirer):
"""Unknown states produce empty output."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|01:27:42|24418435|24418435||1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.batch|24418435.batch|499092K|1|1Gn|"
"COMPLETED|01:27:29\n"
"1|01:27:42|24418435.extern|24418435.extern|1376K|1|1Gn|"
"COMPLETED|00:00:00\n"
"1|21:14:48|25569410|25569410||1|4000Mc|RUNNING|19:28:36\n"
"1|21:14:49|25569410.extern|25569410.extern|1548K|1|4000Mc|"
"RUNNING|00:00:00\n"
"1|21:14:43|25569410.0|25569410.0|62328K|1|4000Mc|RUNNING|19:28:36\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(console.main, "--no-color --state ZZ 25569410 24418435")
assert result.exit_code == 0
# remove header
output = result.output.split("\n")
assert output[0] == "Unknown state ZZ"
assert output[1] == "No valid states provided to include"
assert output[2].split() == [
"JobID",
"State",
"Elapsed",
"TimeEff",
"CPUEff",
"MemEff",
]
assert output[3] == ""
def test_array_job_raw_id(mocker, mock_inquirer):
"""Can find job array by base id."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|00:09:34|24220929_421|24221219||1|16000Mn|"
"COMPLETED|09:28.052\n"
"1|00:09:34|24220929_421.batch|24221219.batch|5664932K|1|16000Mn|"
"COMPLETED|09:28.051\n"
"1|00:09:34|24220929_421.extern|24221219.extern|1404K|1|16000Mn|"
"COMPLETED|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color 24221219 --format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:-1]
assert output[0].split() == [
"24220929_421",
"COMPLETED",
"00:09:34",
"99.0%",
"34.6%",
]
assert len(output) == 1
def test_array_job_single(mocker, mock_inquirer):
"""Can get single array job element."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|00:09:34|24220929_421|24221219||1|16000Mn|"
"COMPLETED|09:28.052\n"
"1|00:09:34|24220929_421.batch|24221219.batch|5664932K|1|16000Mn|"
"COMPLETED|09:28.051\n"
"1|00:09:34|24220929_421.extern|24221219.extern|1404K|1|16000Mn|"
"COMPLETED|00:00:00\n"
"1|00:09:33|24220929_431|24221220||1|16000Mn|"
"PENDING|09:27.460\n"
"1|00:09:33|24220929_431.batch|24221220.batch|5518572K|1|16000Mn|"
"PENDING|09:27.459\n"
"1|00:09:33|24220929_431.extern|24221220.extern|1400K|1|16000Mn|"
"PENDING|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color 24220929_421 --format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:-1]
assert output[0].split() == [
"24220929_421",
"COMPLETED",
"00:09:34",
"99.0%",
"34.6%",
]
assert len(output) == 1
def test_array_job_base(mocker, mock_inquirer):
"""Base array job id gets all elements."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"1|00:09:34|24220929_421|24221219||1|16000Mn|"
"COMPLETED|09:28.052\n"
"1|00:09:34|24220929_421.batch|24221219.batch|5664932K|1|16000Mn|"
"COMPLETED|09:28.051\n"
"1|00:09:34|24220929_421.extern|24221219.extern|1404K|1|16000Mn|"
"COMPLETED|00:00:00\n"
"1|00:09:33|24220929_431|24221220||1|16000Mn|"
"PENDING|09:27.460\n"
"1|00:09:33|24220929_431.batch|24221220.batch|5518572K|1|16000Mn|"
"PENDING|09:27.459\n"
"1|00:09:33|24220929_431.extern|24221220.extern|1400K|1|16000Mn|"
"PENDING|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(
console.main,
"--no-color 24220929 --format JobID%>,State,Elapsed%>,CPUEff,MemEff",
)
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:-1]
assert output[0].split() == [
"24220929_421",
"COMPLETED",
"00:09:34",
"99.0%",
"34.6%",
]
assert output[1].split() == ["24220929_431", "PENDING", "---", "---", "---"]
assert len(output) == 2
def test_sacct_error(mocker, mock_inquirer):
"""Subprocess errors in sacct are reported."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 1
sub_result.stdout = ""
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(console.main, "--no-color 9999999")
assert result.exit_code == 1
assert "Error running sacct!" in result.output
def test_empty_sacct(mocker, mock_inquirer):
"""Emtpy sacct results produce just the header line."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = ""
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(console.main, "--no-color 9999999")
assert result.exit_code == 0
output = result.output.split("\n")[:-1]
assert output[0].split() == [
"JobID",
"State",
"Elapsed",
"TimeEff",
"CPUEff",
"MemEff",
]
assert len(output) == 1
def test_failed_no_mem(mocker, mock_inquirer):
"""Empty memory entries produce valid output."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"8|00:00:12|23000381|23000381||1|4000Mc|FAILED|00:00:00\n"
"8|00:00:12|23000381.batch|23000381.batch||1|4000Mc|"
"FAILED|00:00:00\n"
"8|00:00:12|23000381.extern|23000381.extern|1592K|1|4000Mc|"
"COMPLETED|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(console.main, "--no-color 23000381")
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:-1]
assert output[0].split() == ["23000381", "FAILED", "00:00:12", "---", "---", "0.0%"]
assert len(output) == 1
def test_canceled_by_other(mocker, mock_inquirer):
"""Canceled states are correctly handled."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"16|00:00:00|23000233|23000233||1|4000Mc|CANCELLED by 129319|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(console.main, "--no-color 23000233 --state CA")
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:-1]
assert output[0].split() == [
"23000233",
"CANCELLED",
"00:00:00",
"---",
"---",
"0.0%",
]
assert len(output) == 1
def test_zero_runtime(mocker, mock_inquirer):
"""Entries with zero runtime produce reasonable timeeff."""
mocker.patch("reportseff.console.which", return_value=True)
runner = CliRunner()
sub_result = mocker.MagicMock()
sub_result.returncode = 0
sub_result.stdout = (
"8|00:00:00|23000210|23000210||1|20000Mn|"
"FAILED|00:00.007\n"
"8|00:00:00|23000210.batch|23000210.batch|1988K|1|20000Mn|"
"FAILED|00:00.006\n"
"8|00:00:00|23000210.extern|23000210.extern|1556K|1|20000Mn|"
"COMPLETED|00:00:00\n"
)
mocker.patch("reportseff.db_inquirer.subprocess.run", return_value=sub_result)
result = runner.invoke(console.main, "--no-color 23000210")
assert result.exit_code == 0
# remove header
output = result.output.split("\n")[1:-1]
assert output[0].split() == ["23000210", "FAILED", "00:00:00", "---", "---", "0.0%"]
assert len(output) == 1
def test_no_systems(mocker, mock_inquirer):
"""When no scheduling system is found, raise error."""
mocker.patch("reportseff.console.which", return_value=None)
runner = CliRunner()
result = runner.invoke(console.main, "--no-color 23000210")
assert result.exit_code == 1
# remove header
output = result.output.split("\n")
assert output[0] == "No supported scheduling systems found!"
| 36.208333
| 88
| 0.641337
| 3,292
| 24,332
| 4.643985
| 0.080194
| 0.028257
| 0.021978
| 0.051282
| 0.830782
| 0.814037
| 0.795788
| 0.787873
| 0.76112
| 0.76112
| 0
| 0.176236
| 0.194764
| 24,332
| 671
| 89
| 36.262295
| 0.604042
| 0.05725
| 0
| 0.694853
| 0
| 0.101103
| 0.398894
| 0.272145
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.049632
| false
| 0
| 0.011029
| 0.001838
| 0.0625
| 0.001838
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
30c67f04ab108c99992072a8348926a8f311530d
| 197
|
py
|
Python
|
events/admin.py
|
kobihk/lets-meet
|
b5449b98529dbc80c65a238c6fb415c54b2798b9
|
[
"MIT"
] | null | null | null |
events/admin.py
|
kobihk/lets-meet
|
b5449b98529dbc80c65a238c6fb415c54b2798b9
|
[
"MIT"
] | null | null | null |
events/admin.py
|
kobihk/lets-meet
|
b5449b98529dbc80c65a238c6fb415c54b2798b9
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import EventParticipant, Event, PossibleMeeting
admin.site.register(PossibleMeeting)
admin.site.register(EventParticipant)
admin.site.register(Event)
| 28.142857
| 60
| 0.847716
| 23
| 197
| 7.26087
| 0.478261
| 0.161677
| 0.305389
| 0.383234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071066
| 197
| 6
| 61
| 32.833333
| 0.912568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ebdc3238b0a454f297fd3a5e6564ca963f00a857
| 47
|
py
|
Python
|
x7/x7/__init__.py
|
gribbg/x7
|
f3ff60d1891f828ff48e6c006a0cb0f0fd678414
|
[
"BSD-2-Clause"
] | null | null | null |
x7/x7/__init__.py
|
gribbg/x7
|
f3ff60d1891f828ff48e6c006a0cb0f0fd678414
|
[
"BSD-2-Clause"
] | null | null | null |
x7/x7/__init__.py
|
gribbg/x7
|
f3ff60d1891f828ff48e6c006a0cb0f0fd678414
|
[
"BSD-2-Clause"
] | null | null | null |
from .__version__ import __version__ # noqa
| 23.5
| 46
| 0.765957
| 5
| 47
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191489
| 47
| 1
| 47
| 47
| 0.736842
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ebe0cc714b4960584072235a5f5b1c0ef7ca62d9
| 18
|
py
|
Python
|
src/ros_say/__init__.py
|
tsaoyu/ROS-SAY
|
fb187e6aa88e46bf0dbac2a7a79231d08a82bf5c
|
[
"MIT"
] | null | null | null |
src/ros_say/__init__.py
|
tsaoyu/ROS-SAY
|
fb187e6aa88e46bf0dbac2a7a79231d08a82bf5c
|
[
"MIT"
] | null | null | null |
src/ros_say/__init__.py
|
tsaoyu/ROS-SAY
|
fb187e6aa88e46bf0dbac2a7a79231d08a82bf5c
|
[
"MIT"
] | null | null | null |
from .say import *
| 18
| 18
| 0.722222
| 3
| 18
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 18
| 1
| 18
| 18
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ccd922586da033400699bb60f6770199b7dc6989
| 208
|
py
|
Python
|
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema_interface/create_queue_pipeline.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema_interface/create_queue_pipeline.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema_interface/create_queue_pipeline.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from typing import Callable
from watchmen_data_kernel.storage import TopicTrigger
from watchmen_data_kernel.topic_schema import TopicSchema
CreateQueuePipeline = Callable[[TopicSchema, TopicTrigger], None]
| 29.714286
| 65
| 0.865385
| 24
| 208
| 7.291667
| 0.583333
| 0.137143
| 0.182857
| 0.251429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091346
| 208
| 6
| 66
| 34.666667
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ccea23bb1001b3773412135eabfb6a577f1387e2
| 177
|
py
|
Python
|
synergetic/__init__.py
|
mitchr1598/synergetic
|
1017b0d2fa1256153007be5251941f31a29c5206
|
[
"MIT"
] | null | null | null |
synergetic/__init__.py
|
mitchr1598/synergetic
|
1017b0d2fa1256153007be5251941f31a29c5206
|
[
"MIT"
] | null | null | null |
synergetic/__init__.py
|
mitchr1598/synergetic
|
1017b0d2fa1256153007be5251941f31a29c5206
|
[
"MIT"
] | null | null | null |
from synergetic.synergetic_session import Synergetic
from synergetic.School import school
from synergetic.Attendance import Attendance
from synergetic.Schedule import Schedule
| 29.5
| 52
| 0.881356
| 21
| 177
| 7.380952
| 0.333333
| 0.36129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096045
| 177
| 5
| 53
| 35.4
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
693a7d43c683b4a5c9eb8641b6ef2c27a67dfb87
| 27
|
py
|
Python
|
dictim/__init__.py
|
jamesabel/dictim
|
de8da78a76cba9d76098f57dfed62adc9581702f
|
[
"MIT"
] | 2
|
2020-09-13T06:12:51.000Z
|
2021-07-10T22:38:19.000Z
|
dictim/__init__.py
|
jamesabel/dictim
|
de8da78a76cba9d76098f57dfed62adc9581702f
|
[
"MIT"
] | null | null | null |
dictim/__init__.py
|
jamesabel/dictim
|
de8da78a76cba9d76098f57dfed62adc9581702f
|
[
"MIT"
] | null | null | null |
from .dictim import dictim
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15ca1cf05576eb2abed9fe44ab8062e7fc386232
| 9,648
|
py
|
Python
|
UTILS/check_similarity.py
|
emersonrafaels/ocr_tables
|
11e696422f6fd8508fdc92ffe9a7d14be319e51f
|
[
"MIT"
] | null | null | null |
UTILS/check_similarity.py
|
emersonrafaels/ocr_tables
|
11e696422f6fd8508fdc92ffe9a7d14be319e51f
|
[
"MIT"
] | null | null | null |
UTILS/check_similarity.py
|
emersonrafaels/ocr_tables
|
11e696422f6fd8508fdc92ffe9a7d14be319e51f
|
[
"MIT"
] | null | null | null |
"""
MICROSERVIÇO PARA COMPARAÇÃO DE STRINGS USANDO
A DISTÂNCIA DE LEVENSHTEIN: MÉTRICA PARA MEDIR A DISTÂNCIA
ENTRE DUAS SEQUÊNCIAS DE PALAVRAS.
EM OUTRAS PALAVRAS,
MEDE-SE O NÚMERO MÍNIMO DE EDIÇÕES QUE VOCÊ PRECISA FAZER
PARA ALTERAR UMA SEQUÊNCIA DE UMA PALAVRA NA OUTRA.
ESSAS EDIÇÕES PODEM SER INSERÇÕES, EXCLUSÕES OU SUBSTITUIÇÕES.
ESSE MICROSERVIÇO CONTÉM UMA SÉRIE DE FUNÇÕES
PARA LIDAR COM A MEDIDA DE DISTÂNCIA:
DISPONIBILIZANDO COMO OPÇÕES:
1) PRÉ PROCESSAMENTO DAS STRINGS, ÚTIL PARA QUANDO ALGO TEM UMA VARIAÇÃO CONSIDERÁVEL DE GRAFIA
EX: "EMERSON V. RAFAEL" COMPARADO COM "Emerson v. Rafael"
2) EM UMA LISTA DE ESCOLHAS POSSÍVEIS (AQUI DEFINIDA COMO CHOICES),
OBTER O VALOR DE MÁXIMA SIMILARIDADE
A UMA DETERMINADA PALAVRA (AQUI DEFINIDA COMO QUERY)
3) EM UMA LISTA DE ESCOLHAS POSSÍVEIS (AQUI DEFINIDA COMO CHOICES),
OBTER TODOS OS PERCENTUAIS DE SIMILARIDADE.
A UMA DETERMINADA PALAVRA (AQUI DEFINIDA COMO QUERY)
# Arguments
query - Required : Palavra a ser comparada
ou utilizada como base para obter
as similaridades
dentre as possibilidades (String)
choices - Required : Palavra ser comparada com a query ou a lista
de palavras a serem comparadas
com a query (Str | List)
pre_processing - Optional : Definindo se deve haver
pré processamento (Boolean)
# Returns
percentual_similarity - Required : Percentual de similaridade (String | List)
"""
__version__ = "1.0"
__author__ = """Emerson V. Rafael (EMERVIN)"""
__data_atualizacao__ = "06/10/2021"
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from typing import Union
from pydantic import validate_arguments
class Check_Similarity():
def __init__(self):
"""
MICROSERVIÇO PARA COMPARAÇÃO DE STRINGS USANDO
A DISTÂNCIA DE LEVENSHTEIN: MÉTRICA PARA MEDIR A DISTÂNCIA
ENTRE DUAS SEQUÊNCIAS DE PALAVRAS.
EM OUTRAS PALAVRAS,
MEDE-SE O NÚMERO MÍNIMO DE EDIÇÕES QUE VOCÊ PRECISA FAZER
PARA ALTERAR UMA SEQUÊNCIA DE UMA PALAVRA NA OUTRA.
ESSAS EDIÇÕES PODEM SER INSERÇÕES, EXCLUSÕES OU SUBSTITUIÇÕES.
ESSE MICROSERVIÇO CONTÉM UMA SÉRIE DE FUNÇÕES
PARA LIDAR COM A MEDIDA DE DISTÂNCIA:
DISPONIBILIZANDO COMO OPÇÕES:
1) PRÉ PROCESSAMENTO DAS STRINGS, ÚTIL PARA QUANDO ALGO TEM UMA VARIAÇÃO CONSIDERÁVEL DE GRAFIA
EX: "EMERSON V. RAFAEL" COMPARADO COM "Emerson v. Rafael"
2) EM UMA LISTA DE ESCOLHAS POSSÍVEIS (AQUI DEFINIDA COMO CHOICES),
OBTER O VALOR DE MÁXIMA SIMILARIDADE
A UMA DETERMINADA PALAVRA (AQUI DEFINIDA COMO QUERY)
3) EM UMA LISTA DE ESCOLHAS POSSÍVEIS (AQUI DEFINIDA COMO CHOICES),
OBTER TODOS OS PERCENTUAIS DE SIMILARIDADE.
A UMA DETERMINADA PALAVRA (AQUI DEFINIDA COMO QUERY)
# Arguments
query - Required : Palavra a ser comparada
ou utilizada como base para obter
as similaridades
dentre as possibilidades (String)
choices - Required : Palavra ser comparada com a query ou a lista
de palavras a serem comparadas
com a query (String | List)
pre_processing - Optional : Definindo se deve haver
pré processamento (Boolean)
limit - Optional : Limite de resultados
de similaridade (Integer)
# Returns
percentual_similarity - Required : Percentual de similaridade (String | List)
"""
pass
@staticmethod
def pre_processing_string(value_to_processing):
"""
REALIZA O PRÉ PROCESSAMENTO DAS STRINGS.
PARA LISTAS ENVIADAS, UTILIZA LIST COMPREHESION
PARA ATUALIZAR CADA UMA DAS STRINGS DA LISTA
1) CONVERTE PARA LOWER CASE
2) RETIRA ESPAÇOS EM BRANCO ANTES E DEPOIS DA STRING
# Arguments
value_to_processing - Required : Valores para realizar
o pré processamento (String | List)
# Returns
value_processing - Required : Valores após processamento (String | List)
"""
if isinstance(value_to_processing, str):
value_processing = value_to_processing.lower().strip()
return value_processing
elif isinstance(value_to_processing, list):
value_processing = [str(value).lower().strip() for value in value_to_processing]
return value_processing
else:
return value_to_processing
@staticmethod
@validate_arguments
def get_values_similarity(query: str, choices: Union[str, list],
pre_processing=False, limit=5):
"""
OBTÉM OS VALORES DE SIMILARIDADE PARA TODOS OS ITENS DE CHOICES.
1) COMPARA QUERY COM CADA ITEM DE CHOICES
2) OBTÉM O VALOR DE SIMILARIDADE EM CADA COMPARAÇÃO
3) RETORNA UMA LISTA DE TUPLAS CONTENDO ITEM E PERCENTUAL DE SIMILARIDADE.
# Arguments
query - Required : Palavra a ser comparada
ou utilizada como base para obter
as similaridades
dentre as possibilidades (String)
choices - Required : Palavra ser comparada com a query ou a lista
de palavras a serem comparadas
com a query (String | List)
pre_processing - Optional : Definindo se deve haver
pré processamento (Boolean)
limit - Optional : Limite de resultados
de similaridade (Integer)
# Returns
percentual_similarity - Required : Percentual de similaridade (String | List)
"""
# VERIFICANDO SE HÁ NECESSIDADE DE PRÉ PROCESSAMENTO
if pre_processing:
# REALIZANDO O PRÉ PROCESSAMENTO
query = Check_Similarity.pre_processing_string(query)
choices = Check_Similarity.pre_processing_string(choices)
if isinstance(choices, str):
choices = choices.split(",")
# RETORNANDO A LISTA DE TUPLAS
#(VALUE, PERCENTUAL_SIMILARIDADE)
return process.extract(query=query, choices=choices, limit=limit)
@staticmethod
def get_value_max_similarity(query: str, choices: Union[str, list],
pre_processing=False, limit=5):
"""
OBTÉM O ITEM QUE POSSUI MAIOR SIMILARIDADE À QUERY.
1) COMPARA QUERY COM CADA ITEM DE CHOICES
2) OBTÉM O VALOR DE SIMILARIDADE EM CADA COMPARAÇÃO
3) SELECIONA A MAIOR SIMILARIDADE
4) RETORNA UMA LISTA DE ÚNICO VALOR CONTENDO ITEM E
PERCENTUAL DE MÁXIMA SIMILARIDADE.
# Arguments
query - Required : Palavra a ser comparada
ou utilizada como base para obter
as similaridades
dentre as possibilidades (String)
choices - Required : Palavra ser comparada com a query ou a lista
de palavras a serem comparadas
com a query (String | List)
pre_processing - Optional : Definindo se deve haver
pré processamento (Boolean)
limit - Optional : Limite de resultados
de similaridade (Integer)
# Returns
percentual_similarity - Required : Percentual de similaridade (String | List)
"""
# VERIFICANDO SE HÁ NECESSIDADE DE PRÉ PROCESSAMENTO
if pre_processing:
query = Check_Similarity.pre_processing_string(query)
choices = Check_Similarity.pre_processing_string(choices)
if isinstance(choices, str):
choices = choices.split(",")
# RETORNANDO A LISTA DE TUPLAS DE ÚNICO VALOR COM MÁXIMA SIMILARIDADE
# (VALUE, PERCENTUAL_SIMILARIDADE)
return process.extractOne(query=query, choices=choices, limit=limit)
| 39.379592
| 107
| 0.534826
| 914
| 9,648
| 5.56674
| 0.210066
| 0.03577
| 0.025157
| 0.009434
| 0.778695
| 0.753145
| 0.73978
| 0.73978
| 0.73978
| 0.726219
| 0
| 0.004887
| 0.427342
| 9,648
| 244
| 108
| 39.540984
| 0.916018
| 0.68864
| 0
| 0.435897
| 0
| 0
| 0.022826
| 0
| 0
| 0
| 0
| 0.012295
| 0
| 1
| 0.102564
| false
| 0.025641
| 0.102564
| 0
| 0.358974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
15e1a5dd4f5f4d836965a3fa081b9914007f5127
| 123
|
py
|
Python
|
blaze/__init__.py
|
henry1jin/alohamora
|
e51e2488ecdf3e9692d5bb6b25ebc88622087c20
|
[
"MIT"
] | 5
|
2020-12-16T03:13:59.000Z
|
2022-03-06T07:16:39.000Z
|
blaze/__init__.py
|
henry1jin/alohamora
|
e51e2488ecdf3e9692d5bb6b25ebc88622087c20
|
[
"MIT"
] | 9
|
2020-09-25T23:25:59.000Z
|
2022-03-11T23:45:14.000Z
|
blaze/__init__.py
|
henry1jin/alohamora
|
e51e2488ecdf3e9692d5bb6b25ebc88622087c20
|
[
"MIT"
] | 3
|
2019-10-16T21:22:07.000Z
|
2020-07-21T13:38:22.000Z
|
""" Initialize the blaze package and import __version__ into the global namespace """
from .__version__ import __version__
| 41
| 85
| 0.804878
| 15
| 123
| 5.8
| 0.733333
| 0.298851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138211
| 123
| 2
| 86
| 61.5
| 0.820755
| 0.626016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15e9c489b3017d100ab4b9d772279ddcd41fc394
| 3,524
|
py
|
Python
|
firecares/firecares_core/autocomplete_light_registry.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 12
|
2016-01-30T02:28:35.000Z
|
2019-05-29T15:49:56.000Z
|
firecares/firecares_core/autocomplete_light_registry.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 455
|
2015-07-27T20:21:56.000Z
|
2022-03-11T23:26:20.000Z
|
firecares/firecares_core/autocomplete_light_registry.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 14
|
2015-07-29T09:45:53.000Z
|
2020-10-21T20:03:17.000Z
|
import autocomplete_light.shortcuts as al
from .models import Address
from django.contrib.auth import get_user_model
from firecares.firestation.models import FireDepartment, FireStation
User = get_user_model()
al.register(Address,
# Just like in ModelAdmin.search_fields
search_fields=['address_line1', 'city', 'state_province', 'postal_code'],
attrs={
# This will set the input placeholder attribute:
'placeholder': 'Address',
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-autocomplete-minimum-characters': 3,
},
# This will set the data-widget-maximum-values attribute on the
# widget container element, and will be set to
# yourlabs.Widget.maximumValues (jQuery handles the naming
# conversion).
widget_attrs={
'data-widget-maximum-values': 100,
# Enable modern-style widget !
'class': 'modern-style',
},)
al.register(FireDepartment,
# Just like in ModelAdmin.search_fields
search_fields=['name', 'fdid', 'id'],
attrs={
# This will set the input placeholder attribute:
'placeholder': 'Fire Department',
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-autocomplete-minimum-characters': 1,
},
# This will set the data-widget-maximum-values attribute on the
# widget container element, and will be set to
# yourlabs.Widget.maximumValues (jQuery handles the naming
# conversion).
widget_attrs={
'data-widget-maximum-values': 100,
# Enable modern-style widget !
'class': 'modern-style',
},)
al.register(FireStation,
# Just like in ModelAdmin.search_fields
search_fields=['name'],
attrs={
# This will set the input placeholder attribute:
'placeholder': 'Fire Station',
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-autocomplete-minimum-characters': 1,
},
# This will set the data-widget-maximum-values attribute on the
# widget container element, and will be set to
# yourlabs.Widget.maximumValues (jQuery handles the naming
# conversion).
widget_attrs={
'data-widget-maximum-values': 100,
# Enable modern-style widget !
'class': 'modern-style',
},)
# TODO: Check if this autocomplete is still needed
al.register(User,
search_fields=['username'],
attrs={
'data-autocomplete-minimum-characters': 1,
},
choices=User.objects.filter(is_active=True)),
al.register(User,
name='UserEmailAutocomplete',
search_fields=['email'],
attrs={
'data-autocomplete-minimum-characters': 1,
},
choices=User.objects.filter(is_active=True).exclude(username='AnonymousUser'),
choice_value=lambda self, choice: choice.email)
| 40.045455
| 90
| 0.578036
| 346
| 3,524
| 5.823699
| 0.271676
| 0.035732
| 0.049132
| 0.062531
| 0.760298
| 0.760298
| 0.760298
| 0.760298
| 0.738462
| 0.663524
| 0
| 0.006399
| 0.334847
| 3,524
| 87
| 91
| 40.505747
| 0.853242
| 0.354994
| 0
| 0.469388
| 0
| 0
| 0.213458
| 0.124332
| 0
| 0
| 0
| 0.011494
| 0
| 1
| 0
| false
| 0
| 0.081633
| 0
| 0.081633
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
15f90e96b745652cbbbe1d2e9091faf642a6fd1a
| 39
|
py
|
Python
|
facecreator.py
|
rafitricker/facecreator
|
53d297a8c99b55aafaebaf80367f8e0811382406
|
[
"Apache-2.0"
] | null | null | null |
facecreator.py
|
rafitricker/facecreator
|
53d297a8c99b55aafaebaf80367f8e0811382406
|
[
"Apache-2.0"
] | null | null | null |
facecreator.py
|
rafitricker/facecreator
|
53d297a8c99b55aafaebaf80367f8e0811382406
|
[
"Apache-2.0"
] | 1
|
2020-07-11T21:17:02.000Z
|
2020-07-11T21:17:02.000Z
|
print("Marshal Rafi Facebook Tricker.")
| 39
| 39
| 0.794872
| 5
| 39
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c63ed880909c0875542fb8e5e6101f0aead44ab9
| 108
|
py
|
Python
|
thglibs/THG_extra/thg_exemplo/darkcode.py
|
darkcode357/thg_lib
|
c1052bcd85f705ff8be404b7a28964eabef2ed45
|
[
"MIT"
] | null | null | null |
thglibs/THG_extra/thg_exemplo/darkcode.py
|
darkcode357/thg_lib
|
c1052bcd85f705ff8be404b7a28964eabef2ed45
|
[
"MIT"
] | 52
|
2018-10-25T20:29:17.000Z
|
2018-10-25T20:45:02.000Z
|
thglibs/THG_extra/thg_exemplo/darkcode.py
|
darkcode357/thg_lib
|
c1052bcd85f705ff8be404b7a28964eabef2ed45
|
[
"MIT"
] | null | null | null |
class exemplo():
def __init__(self):
pass
def thg_print(darkcode):
print(darkcode)
| 15.428571
| 28
| 0.592593
| 12
| 108
| 4.916667
| 0.75
| 0.440678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.305556
| 108
| 6
| 29
| 18
| 0.786667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0
| 0
| 0.6
| 0.4
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
d66236a33cd70b49cef4f0480bc07c605b0db1f4
| 20
|
py
|
Python
|
const/__init__.py
|
SkyZH/verbose-adventure
|
98ee76b589c166e1b3492d3710c06cdc7d995e6f
|
[
"MIT"
] | null | null | null |
const/__init__.py
|
SkyZH/verbose-adventure
|
98ee76b589c166e1b3492d3710c06cdc7d995e6f
|
[
"MIT"
] | null | null | null |
const/__init__.py
|
SkyZH/verbose-adventure
|
98ee76b589c166e1b3492d3710c06cdc7d995e6f
|
[
"MIT"
] | null | null | null |
from . import words
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d68730989cbe9e697fa40ecfc6c3497b617fe8bc
| 4,648
|
py
|
Python
|
tests/test_image.py
|
hackerYM/text-inside-box
|
824696a678a85271469570034f394b4715d1c8fb
|
[
"MIT"
] | null | null | null |
tests/test_image.py
|
hackerYM/text-inside-box
|
824696a678a85271469570034f394b4715d1c8fb
|
[
"MIT"
] | null | null | null |
tests/test_image.py
|
hackerYM/text-inside-box
|
824696a678a85271469570034f394b4715d1c8fb
|
[
"MIT"
] | null | null | null |
"""
Testing module for Api Image
"""
import pytest
from flask import current_app
from http import HTTPStatus
@pytest.fixture
def req_data():
"""
Method to build the request data
"""
return {
"font_url": "https://storage.googleapis.com/dipp-massimo-development-fonts/4f2cf2b6b99d96ca.ttf",
"image_url": "https://storage.googleapis.com/dipp-massimo-development-images/1f1282fef735f349.jpg",
"text": {
"content": "Dipp inc, thinking out of how to draw a text on the box.",
"text_color": "#000000",
"border_color": "#000000"
},
"box": {
"x": 40,
"y": 100,
"width": 500,
"height": 500
}
}
def test_api_image_with_square_box(snapshot, client, req_data):
"""
HAPPY: Should draw a text box with a square box
"""
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.OK
snapshot.assert_match(response.get_json()["splits"])
def test_api_image_with_vertical_rectangle(snapshot, client, req_data):
"""
HAPPY: Should draw a text box with a vertical rectangle box
"""
req_data["box"]["width"] = 100
req_data["box"]["height"] = 1000
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.OK
snapshot.assert_match(response.get_json()["splits"])
def test_api_image_with_horizontal_rectangle(snapshot, client, req_data):
"""
HAPPY: Should draw a text box with a horizontal rectangle box
"""
req_data["box"]["width"] = 1000
req_data["box"]["height"] = 100
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.OK
snapshot.assert_match(response.get_json()["splits"])
def test_api_image_with_super_long_content(snapshot, client, req_data):
"""
HAPPY: Should draw a text box with a super long content
"""
req_data["text"]["content"] = "draw the text box with a super long content " * 10
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.OK
snapshot.assert_match(response.get_json()["splits"])
def test_400_by_small_box(snapshot, client, req_data):
"""
SAD: Should get the 400 error by the small box size
"""
req_data["box"]["width"] = 10
req_data["box"]["height"] = 10
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.BAD_REQUEST
snapshot.assert_match(response.get_json())
def test_400_by_wrong_width(snapshot, client, req_data):
"""
SAD: Should get the 400 error by the wrong width
"""
req_data["box"]["width"] = -100
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.BAD_REQUEST
snapshot.assert_match(response.get_json())
def test_400_by_wrong_height(snapshot, client, req_data):
"""
SAD: Should get the 400 error by the wrong height
"""
req_data["box"]["height"] = -100
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.BAD_REQUEST
snapshot.assert_match(response.get_json())
def test_400_by_wrong_text_color(snapshot, client, req_data):
"""
SAD: Should get the 400 error by the wrong text color
"""
req_data["text"]["text_color"] = "no-hex-code"
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.BAD_REQUEST
snapshot.assert_match(response.get_json())
def test_400_by_wrong_image_url(snapshot, client, req_data):
"""
SAD: Should get the 400 error by the wrong image url
"""
req_data["image_url"] = "no-image-url"
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.BAD_REQUEST
snapshot.assert_match(response.get_json())
def test_400_by_ghost_image_url(snapshot, client, req_data):
"""
SAD: Should get the 400 error by the ghost image url
"""
req_data["image_url"] = "https://storage.googleapis.com/dipp-massimo-development-images/no-found.jpg"
response = client.post(f"{current_app.config['API_BASE_PATH']}draw", json=req_data)
assert response.status_code == HTTPStatus.BAD_REQUEST
snapshot.assert_match(response.get_json())
| 32.732394
| 107
| 0.686532
| 651
| 4,648
| 4.663594
| 0.152074
| 0.076087
| 0.055995
| 0.06917
| 0.812253
| 0.79809
| 0.767787
| 0.753294
| 0.736825
| 0.736825
| 0
| 0.02734
| 0.181583
| 4,648
| 141
| 108
| 32.964539
| 0.770768
| 0.128657
| 0
| 0.416667
| 0
| 0.013889
| 0.254836
| 0.105752
| 0
| 0
| 0
| 0
| 0.277778
| 1
| 0.152778
| false
| 0
| 0.041667
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d68cf365b877853016c9d396f39ce3cf6750d5cf
| 98
|
py
|
Python
|
Chapter 2/Exersises/(2-2) Simple_Messages.py
|
3GamersStudios/SHSPythonWork
|
6f98ad3a25d30f2670dc48ca4f9b4cf75eb37a61
|
[
"MIT"
] | null | null | null |
Chapter 2/Exersises/(2-2) Simple_Messages.py
|
3GamersStudios/SHSPythonWork
|
6f98ad3a25d30f2670dc48ca4f9b4cf75eb37a61
|
[
"MIT"
] | null | null | null |
Chapter 2/Exersises/(2-2) Simple_Messages.py
|
3GamersStudios/SHSPythonWork
|
6f98ad3a25d30f2670dc48ca4f9b4cf75eb37a61
|
[
"MIT"
] | null | null | null |
message = "This is a message!"
print(message)
message = "This is a new message!"
print(message)
| 14
| 34
| 0.704082
| 15
| 98
| 4.6
| 0.4
| 0.318841
| 0.376812
| 0.405797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173469
| 98
| 7
| 35
| 14
| 0.851852
| 0
| 0
| 0.5
| 0
| 0
| 0.40404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
d697db7621b1001827d5a730d2fa4e0b7aaf6c9f
| 9,790
|
py
|
Python
|
boa3_test/tests/compiler_tests/test_python_operation.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/tests/compiler_tests/test_python_operation.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/tests/compiler_tests/test_python_operation.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
from boa3.exception.CompilerError import MismatchedTypes
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.testengine import TestEngine
class TestPythonOperation(BoaTest):
default_folder: str = 'test_sc/python_operation_test'
def test_in_str(self):
path = self.get_contract_path('StringIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', '123', '1234')
self.assertEqual('123' in '1234', result)
result = self.run_smart_contract(engine, path, 'main', '42', '1234')
self.assertEqual('42' in '1234', result)
def test_not_in_str(self):
path = self.get_contract_path('StringNotIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', '123', '1234')
self.assertEqual('123' not in '1234', result)
result = self.run_smart_contract(engine, path, 'main', '42', '1234')
self.assertEqual('42' not in '1234', result)
def test_str_membership_mismatched_type(self):
path = self.get_contract_path('StringMembershipMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_in_bytes(self):
path = self.get_contract_path('BytesIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', b'123', b'1234')
self.assertEqual(b'123' in b'1234', result)
result = self.run_smart_contract(engine, path, 'main', b'42', b'1234')
self.assertEqual(b'42' in b'1234', result)
result = self.run_smart_contract(engine, path, 'main', b'34', b'1234')
self.assertEqual(b'34' in b'1234', result)
def test_not_in_bytes(self):
path = self.get_contract_path('BytesNotIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', b'123', b'1234')
self.assertEqual(b'123' not in b'1234', result)
result = self.run_smart_contract(engine, path, 'main', b'42', b'1234')
self.assertEqual(b'42' not in b'1234', result)
def test_int_in_bytes(self):
path = self.get_contract_path('BytesMembershipWithInt.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, b'1234')
self.assertEqual(1 in b'1234', result)
result = self.run_smart_contract(engine, path, 'main', 50, b'1234')
self.assertEqual(50 in b'1234', result)
def test_bytes_membership_mismatched_type(self):
path = self.get_contract_path('BytesMembershipMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_in_list(self):
path = self.get_contract_path('ListIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, [1, 2, '3', '4'])
self.assertEqual(1 in [1, 2, '3', '4'], result)
result = self.run_smart_contract(engine, path, 'main', 3, [1, 2, '3', '4'])
self.assertEqual(3 in [1, 2, '3', '4'], result)
result = self.run_smart_contract(engine, path, 'main', '4', [1, 2, '3', '4'])
self.assertEqual('4' in [1, 2, '3', '4'], result)
def test_in_typed_list(self):
path = self.get_contract_path('TypedListIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, [1, 2, 3, 4])
self.assertEqual(1 in [1, 2, 3, 4], result)
result = self.run_smart_contract(engine, path, 'main', 6, [1, 2, 3, 4])
self.assertEqual(6 in [1, 2, 3, 4], result)
def test_not_in_list(self):
path = self.get_contract_path('ListNotIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, [1, 2, '3', '4'])
self.assertEqual(1 not in [1, 2, '3', '4'], result)
result = self.run_smart_contract(engine, path, 'main', 3, [1, 2, '3', '4'])
self.assertEqual(3 not in [1, 2, '3', '4'], result)
result = self.run_smart_contract(engine, path, 'main', '4', [1, 2, '3', '4'])
self.assertEqual('4' not in [1, 2, '3', '4'], result)
def test_not_in_typed_list(self):
path = self.get_contract_path('TypedListNotIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, [1, 2, 3, 4])
self.assertEqual(1 not in [1, 2, 3, 4], result)
result = self.run_smart_contract(engine, path, 'main', 6, [1, 2, 3, 4])
self.assertEqual(6 not in [1, 2, 3, 4], result)
def test_list_membership_mismatched_type(self):
path = self.get_contract_path('ListMembershipMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_in_tuple(self):
path = self.get_contract_path('TupleIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, (1, 2, '3', '4'))
self.assertEqual(1 in (1, 2, '3', '4'), result)
result = self.run_smart_contract(engine, path, 'main', 3, (1, 2, '3', '4'))
self.assertEqual(3 in (1, 2, '3', '4'), result)
result = self.run_smart_contract(engine, path, 'main', '4', (1, 2, '3', '4'))
self.assertEqual('4' in (1, 2, '3', '4'), result)
def test_in_typed_tuple(self):
path = self.get_contract_path('TypedTupleIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, (1, 2, 3, 4))
self.assertEqual(1 in (1, 2, 3, 4), result)
result = self.run_smart_contract(engine, path, 'main', 6, (1, 2, 3, 4))
self.assertEqual(6 in (1, 2, 3, 4), result)
def test_not_in_tuple(self):
path = self.get_contract_path('TupleNotIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, (1, 2, '3', '4'))
self.assertEqual(1 not in (1, 2, '3', '4'), result)
result = self.run_smart_contract(engine, path, 'main', 3, (1, 2, '3', '4'))
self.assertEqual(3 not in (1, 2, '3', '4'), result)
result = self.run_smart_contract(engine, path, 'main', '4', (1, 2, '3', '4'))
self.assertEqual('4' not in (1, 2, '3', '4'), result)
def test_not_in_typed_tuple(self):
path = self.get_contract_path('TypedTupleNotIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, (1, 2, 3, 4))
self.assertEqual(1 not in (1, 2, 3, 4), result)
result = self.run_smart_contract(engine, path, 'main', 6, (1, 2, 3, 4))
self.assertEqual(6 not in (1, 2, 3, 4), result)
def test_tuple_membership_mismatched_type(self):
path = self.get_contract_path('TupleMembershipMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_in_dict(self):
path = self.get_contract_path('DictIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, {1: '2', '4': 8})
self.assertEqual(1 in {1: '2', '4': 8}, result)
result = self.run_smart_contract(engine, path, 'main', '1', {1: '2', '4': 8})
self.assertEqual('1' in {1: '2', '4': 8}, result)
result = self.run_smart_contract(engine, path, 'main', 8, {1: '2', '4': 8})
self.assertEqual(8 in {1: '2', '4': 8}, result)
result = self.run_smart_contract(engine, path, 'main', '4', {1: '2', '4': 8})
self.assertEqual('4' in {1: '2', '4': 8}, result)
def test_in_typed_dict(self):
path = self.get_contract_path('TypedDictIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, {1: '2', 4: '8'})
self.assertEqual(1 in {1: '2', 4: '8'}, result)
result = self.run_smart_contract(engine, path, 'main', 3, {1: '2', 4: '8'})
self.assertEqual(3 in {1: '2', 4: '8'}, result)
def test_not_in_dict(self):
path = self.get_contract_path('DictNotIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, {1: '2', '4': 8})
self.assertEqual(1 not in {1: '2', '4': 8}, result)
result = self.run_smart_contract(engine, path, 'main', '1', {1: '2', '4': 8})
self.assertEqual('1' not in {1: '2', '4': 8}, result)
result = self.run_smart_contract(engine, path, 'main', 8, {1: '2', '4': 8})
self.assertEqual(8 not in {1: '2', '4': 8}, result)
result = self.run_smart_contract(engine, path, 'main', '4', {1: '2', '4': 8})
self.assertEqual('4' not in {1: '2', '4': 8}, result)
def test_not_in_typed_dict(self):
path = self.get_contract_path('TypedDictNotIn.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, {1: '2', 4: '8'})
self.assertEqual(1 not in {1: '2', 4: '8'}, result)
result = self.run_smart_contract(engine, path, 'main', 3, {1: '2', 4: '8'})
self.assertEqual(3 not in {1: '2', 4: '8'}, result)
def test_dict_membership_mismatched_type(self):
path = self.get_contract_path('DictMembershipMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
| 40.288066
| 85
| 0.610827
| 1,386
| 9,790
| 4.140693
| 0.058442
| 0.022304
| 0.097404
| 0.134867
| 0.890922
| 0.875065
| 0.860603
| 0.860603
| 0.759714
| 0.676076
| 0
| 0.060955
| 0.227477
| 9,790
| 242
| 86
| 40.454545
| 0.697871
| 0
| 0
| 0.454023
| 0
| 0
| 0.085495
| 0.021757
| 0
| 0
| 0
| 0
| 0.275862
| 1
| 0.126437
| false
| 0
| 0.017241
| 0
| 0.155172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d697e37b78caa74127b345829585f93972d0cc4a
| 6,054
|
py
|
Python
|
model_utils/model.py
|
phecda-xu/FullyCNNSpeechEnhancement
|
77ff39c87263a7303f3e1d4f879a728e91b574d5
|
[
"Apache-2.0"
] | 3
|
2020-06-21T11:40:44.000Z
|
2021-04-20T01:43:54.000Z
|
model_utils/model.py
|
phecda-xu/FullyCNNSpeechEnhancement
|
77ff39c87263a7303f3e1d4f879a728e91b574d5
|
[
"Apache-2.0"
] | 1
|
2021-01-16T08:29:22.000Z
|
2021-01-23T11:26:32.000Z
|
model_utils/model.py
|
phecda-xu/FullyCNNSpeechEnhancement
|
77ff39c87263a7303f3e1d4f879a728e91b574d5
|
[
"Apache-2.0"
] | 2
|
2020-12-25T07:08:36.000Z
|
2021-03-18T12:25:29.000Z
|
# coding: utf-8
from model_utils.module import *
class FullyCNNSEModel(object):
def __init__(self, is_training):
self.is_training = is_training
def encode(self, x):
self.encode_1 = conv_bn_relu(x, 12, kernel_size=(8, 13), stride=(1, 1), is_training=self.is_training, scope="encode_1")
self.encode_2 = conv_bn_relu(self.encode_1, 16, kernel_size=(1, 11), stride=(1, 1), is_training=self.is_training, scope="encode_2")
self.encode_3 = conv_bn_relu(self.encode_2, 20, kernel_size=(1, 9), stride=(1, 1), is_training=self.is_training, scope="encode_3")
self.encode_4 = conv_bn_relu(self.encode_3, 24, kernel_size=(1, 7), stride=(1, 1), is_training=self.is_training, scope="encode_4")
encode_5 = conv_bn_relu(self.encode_4, 32, kernel_size=(1, 7), stride=(1, 1), is_training=self.is_training, scope="encode_8")
return encode_5
def decode(self, x):
x = conv_bn_relu(x, 24, kernel_size=(1, 7), stride=(1, 1), is_training=self.is_training, scope="decode_1", skip_input=self.encode_4)
x = conv_bn_relu(x, 20, kernel_size=(1, 9), stride=(1, 1), is_training=self.is_training, scope="decode_2", skip_input=self.encode_3)
x = conv_bn_relu(x, 16, kernel_size=(1, 11), stride=(1, 1), is_training=self.is_training, scope="decode_3", skip_input=self.encode_2)
x = conv_bn_relu(x, 12, kernel_size=(1, 13), stride=(1, 1), is_training=self.is_training, scope="decode_4", skip_input=self.encode_1)
x = conv_bn_relu(x, 1, kernel_size=(1, 129), stride=(1, 1), is_training=self.is_training, scope="decode_5", use_norm=False, use_act=False)
return x
def __call__(self, x):
encode_out = self.encode(x)
decode_out = self.decode(encode_out)
return decode_out
class FullyCNNSEModelV2(object):
def __init__(self, is_training):
self.is_training = is_training
def encode(self, x):
self.encode_1 = conv_bn_relu(x, 10, kernel_size=(8, 11), stride=(1, 1), is_training=self.is_training, scope="encode_1")
self.encode_2 = conv_bn_relu(self.encode_1, 12, kernel_size=(1, 7), stride=(1, 1), is_training=self.is_training, scope="encode_2")
self.encode_3 = conv_bn_relu(self.encode_2, 14, kernel_size=(1, 5), stride=(1, 1), is_training=self.is_training, scope="encode_3")
self.encode_4 = conv_bn_relu(self.encode_3, 15, kernel_size=(1, 5), stride=(1, 1), is_training=self.is_training, scope="encode_4")
self.encode_5 = conv_bn_relu(self.encode_4, 19, kernel_size=(1, 5), stride=(1, 1), is_training=self.is_training, scope="encode_5")
self.encode_6 = conv_bn_relu(self.encode_5, 21, kernel_size=(1, 5), stride=(1, 1), is_training=self.is_training, scope="encode_6")
self.encode_7 = conv_bn_relu(self.encode_6, 23, kernel_size=(1, 7), stride=(1, 1), is_training=self.is_training, scope="encode_7")
encode_8 = conv_bn_relu(self.encode_7, 25, kernel_size=(1, 11), stride=(1, 1), is_training=self.is_training, scope="encode_8")
return encode_8
def decode(self, x):
x = conv_bn_relu(x, 23, kernel_size=(1, 7), stride=(1, 1), is_training=self.is_training, scope="decode_1", skip_input=self.encode_7)
x = conv_bn_relu(x, 21, kernel_size=(1, 5), stride=(1, 1), is_training=self.is_training, scope="decode_2", skip_input=self.encode_6)
x = conv_bn_relu(x, 19, kernel_size=(1, 5), stride=(1, 1), is_training=self.is_training, scope="decode_3", skip_input=self.encode_5)
x = conv_bn_relu(x, 15, kernel_size=(1, 5), stride=(1, 1), is_training=self.is_training, scope="decode_4", skip_input=self.encode_4)
x = conv_bn_relu(x, 14, kernel_size=(1, 5), stride=(1, 1), is_training=self.is_training, scope="decode_5", skip_input=self.encode_3)
x = conv_bn_relu(x, 12, kernel_size=(1, 7), stride=(1, 1), is_training=self.is_training, scope="decode_6", skip_input=self.encode_2)
x = conv_bn_relu(x, 10, kernel_size=(1, 11), stride=(1, 1), is_training=self.is_training, scope="decode_7", skip_input=self.encode_1)
x = conv_bn_relu(x, 1, kernel_size=(1, 129), stride=(1, 1), is_training=self.is_training, scope="decode_8", use_norm=False, use_act=False)
return x
def __call__(self, x):
encode_out = self.encode(x)
decode_out = self.decode(encode_out)
return decode_out
class FullyCNNSEModelV3(object):
def __init__(self, is_training):
self.is_training = is_training
def simple_RCED(self, x, first_kernel, name, skip_input=None):
encode_1 = conv_bn_relu(x, 18, kernel_size=first_kernel, stride=(1, 1), is_training=self.is_training,
scope="{}_encode_1".format(name))
encode_2 = conv_bn_relu(encode_1, 30, kernel_size=(1, 5), stride=(1, 1), is_training=self.is_training,
scope="{}_encode_2".format(name))
encode_3 = conv_bn_relu(encode_2, 8, kernel_size=(1, 9), stride=(1, 1), is_training=self.is_training,
scope="{}_decode".format(name))
if skip_input is not None:
encode_3 = encode_3 + skip_input
return encode_3
def cascaded_encoder(self, x):
self.c_encode_1 = self.simple_RCED(x, first_kernel=(8, 9), name="CE1")
self.c_encode_2 = self.simple_RCED(self.c_encode_1, first_kernel=(1, 9), name="CE2")
c_encode_3 = self.simple_RCED(self.c_encode_2, first_kernel=(1, 9), name="CE3")
return c_encode_3
def cascaded_decoder(self, x):
x = self.simple_RCED(x, first_kernel=(1, 9), name="CD1", skip_input=self.c_encode_2)
x = self.simple_RCED(x, first_kernel=(1, 9), name="CD2", skip_input=self.c_encode_1)
x = conv_bn_relu(x, 1, kernel_size=(1, 129), stride=(1, 1), is_training=self.is_training, scope="decode_final",
use_norm=False, use_act=False)
return x
def __call__(self, x):
encode_out = self.cascaded_encoder(x)
decode_out = self.cascaded_decoder(encode_out)
return decode_out
| 62.412371
| 146
| 0.671292
| 1,011
| 6,054
| 3.695351
| 0.07913
| 0.18469
| 0.134904
| 0.141328
| 0.872323
| 0.812902
| 0.787741
| 0.780782
| 0.765792
| 0.748394
| 0
| 0.057766
| 0.182194
| 6,054
| 96
| 147
| 63.0625
| 0.696829
| 0.002147
| 0
| 0.298701
| 0
| 0
| 0.044047
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168831
| false
| 0
| 0.012987
| 0
| 0.350649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d6b3dc141d74447c14db182ff05558c7feda58fb
| 108
|
py
|
Python
|
onapi/athletics/__init__.py
|
Lugal-PCZ/bbapi-toolkit
|
7e0ef7b1843d8aad4ac31f21872a69655f6167f3
|
[
"MIT"
] | 4
|
2019-12-13T13:34:17.000Z
|
2022-03-28T20:17:41.000Z
|
onapi/athletics/__init__.py
|
Lugal-PCZ/bbapi-toolkit
|
7e0ef7b1843d8aad4ac31f21872a69655f6167f3
|
[
"MIT"
] | 1
|
2019-08-20T16:30:39.000Z
|
2019-09-23T16:32:12.000Z
|
onapi/athletics/__init__.py
|
Lugal-PCZ/bbapi-toolkit
|
7e0ef7b1843d8aad4ac31f21872a69655f6167f3
|
[
"MIT"
] | null | null | null |
from . import location
from . import opponent
from . import schedule
from . import sport
from . import team
| 18
| 22
| 0.768519
| 15
| 108
| 5.533333
| 0.466667
| 0.60241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 108
| 5
| 23
| 21.6
| 0.943182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d6d42d7f575cfbffb4cf8bb83f4ca997749a1d07
| 153
|
py
|
Python
|
src/__init__.py
|
Thrimbda/Thrive-Compiler
|
dcbdacd129909f385d030312cd83b1dfb66e74b1
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
Thrimbda/Thrive-Compiler
|
dcbdacd129909f385d030312cd83b1dfb66e74b1
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
Thrimbda/Thrive-Compiler
|
dcbdacd129909f385d030312cd83b1dfb66e74b1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: Macsnow
# @Date: 2017-04-13 00:42:20
# @Last Modified by: Macsnow
# @Last Modified time: 2017-04-13 00:42:21
| 25.5
| 43
| 0.601307
| 25
| 153
| 3.68
| 0.68
| 0.130435
| 0.173913
| 0.217391
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0.239669
| 0.20915
| 153
| 5
| 44
| 30.6
| 0.520661
| 0.895425
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
baad871a451d013f7c3a447ea57f989f8cc6edd7
| 1,380
|
py
|
Python
|
QAS/Patient/serializers.py
|
jinimp/QAS
|
e2417e3ddde98e763d3a9ab7e147a82222d9080f
|
[
"MIT"
] | null | null | null |
QAS/Patient/serializers.py
|
jinimp/QAS
|
e2417e3ddde98e763d3a9ab7e147a82222d9080f
|
[
"MIT"
] | null | null | null |
QAS/Patient/serializers.py
|
jinimp/QAS
|
e2417e3ddde98e763d3a9ab7e147a82222d9080f
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
# @author: condi
# @file: serializers.py
# @time: 19-2-20 下午4:32
from rest_framework import serializers
from .models import Patient
class SCPatientSerializer(serializers.ModelSerializer):
"""查增"""
report_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S')
send_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S')
class Meta:
model = Patient
fields = ('id', 'name', 'age', 'gender', 'specimen_source',
'num_no', 'report_time', 'send_time')
def validate_age(self, value):
"""验证年龄"""
if value:
if int(value) > 100 or int(value) < 10:
raise serializers.ValidationError('参数错误')
return value
class UPatientSerializer(serializers.ModelSerializer):
"""修改"""
report_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S', read_only=True)
send_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S', read_only=True)
class Meta:
model = Patient
fields = ('id', 'name', 'age', 'gender', 'specimen_source',
'num_no', 'report_time', 'send_time')
def validate_age(self, value):
"""验证年龄"""
if value:
if int(value) > 100 or int(value) < 10:
raise serializers.ValidationError('参数错误')
return value
| 29.361702
| 87
| 0.593478
| 165
| 1,380
| 4.860606
| 0.406061
| 0.049875
| 0.139651
| 0.169576
| 0.713217
| 0.713217
| 0.713217
| 0.713217
| 0.713217
| 0.713217
| 0
| 0.019212
| 0.245652
| 1,380
| 46
| 88
| 30
| 0.751201
| 0.087681
| 0
| 0.692308
| 0
| 0
| 0.15235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bab7fb96a8c7527f1bc69fdcb3a624160b4e6769
| 63
|
py
|
Python
|
tests/__init__.py
|
lukaszb/humanize
|
cd00a150e48b77d38e1b2a696a02c092b5767ee0
|
[
"MIT"
] | 1
|
2017-10-11T03:02:36.000Z
|
2017-10-11T03:02:36.000Z
|
tests/__init__.py
|
lukaszb/humanize
|
cd00a150e48b77d38e1b2a696a02c092b5767ee0
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
lukaszb/humanize
|
cd00a150e48b77d38e1b2a696a02c092b5767ee0
|
[
"MIT"
] | null | null | null |
from time import *
from number import *
from filesize import *
| 15.75
| 22
| 0.761905
| 9
| 63
| 5.333333
| 0.555556
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 63
| 3
| 23
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bacd706f00aa31d69142d8ff876de3f66db1f6e4
| 440,542
|
py
|
Python
|
logistic_regression_pipeline.py
|
malwash/Simulation_Calibration
|
fd0ebd54e78694aa0d256d3837fa67642a35c54b
|
[
"Apache-2.0"
] | null | null | null |
logistic_regression_pipeline.py
|
malwash/Simulation_Calibration
|
fd0ebd54e78694aa0d256d3837fa67642a35c54b
|
[
"Apache-2.0"
] | null | null | null |
logistic_regression_pipeline.py
|
malwash/Simulation_Calibration
|
fd0ebd54e78694aa0d256d3837fa67642a35c54b
|
[
"Apache-2.0"
] | null | null | null |
import random
#1 - compact dictionary into a dict or dict 414-436 homer simpson
#3 - Compact a single execution of a pipeline into a class 445-764
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import roc_curve, roc_auc_score, classification_report, accuracy_score, confusion_matrix
from sklearn.metrics import r2_score
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import GaussianNB, CategoricalNB, BernoulliNB, MultinomialNB, ComplementNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from statistics import mean
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
from dagsim.baseDS import Graph, Generic
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import csv
import simulation_notears
import simulation_bnlearn
import simulation_dagsim
import simulation_models
import simulation_pgmpy
import simulation_pomegranate
from sklearn import metrics
from sklearn import svm
#Save linear, nonlinear, sparse, dimensional training set of the real-world for reproducablity
global pipeline_type
global linear_training
global nonlinear_training
global sparse_training
global dimensional_training
# Attampt at globalising the training set of all pipelines from real world
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#pipeline_type = 2
#nonlinear_training = simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#pipeline_type = 3
#sparse_training = simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#pipeline_type = 4
#dimensional_training = simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
# import the saved training and test data from DagSim's real world
def import_real_world_csv(pipeline_type):
global train_data
train_data = pd.read_csv("train.csv")
global train_data_numpy
train_data_numpy = train_data.to_numpy()
global x_train
global y_train
if(pipeline_type==4):
x_train = train_data.iloc[:, 0:10].to_numpy().reshape([-1, 10]) # num predictors
y_train = train_data.iloc[:, 10].to_numpy().reshape([-1]).ravel() # outcome
elif(pipeline_type == 1 or pipeline_type == 2 or pipeline_type == 3):
x_train = train_data.iloc[:, 0:4].to_numpy().reshape([-1, 4]) # num predictors
y_train = train_data.iloc[:, 4].to_numpy().reshape([-1]).ravel() # outcome
global test_data
global x_test
global y_test
test_data = pd.read_csv("test.csv")
if(pipeline_type==4):
x_test = test_data.iloc[:, 0:10].to_numpy().reshape([-1, 10])
y_test = test_data.iloc[:, 10].to_numpy().reshape([-1]).ravel()
elif(pipeline_type==1 or pipeline_type==2 or pipeline_type==3 ):
x_test = test_data.iloc[:, 0:4].to_numpy().reshape([-1, 4])
y_test = test_data.iloc[:, 4].to_numpy().reshape([-1]).ravel()
# Evaluate function for all ML techniques in the real-world
def realworld_evaluate(pipeline_type):
import_real_world_csv(pipeline_type)
#Decision Tree
clf = DecisionTreeClassifier(criterion='gini')
clf = clf.fit(x_train, y_train)
if(pipeline_type==1):
global real_linear_dt_scores
y_pred = clf.predict(x_test)
real_linear_dt_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==2):
global real_nonlinear_dt_scores
y_pred = clf.predict(x_test)
real_nonlinear_dt_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==3):
global real_sparse_dt_scores
y_pred = clf.predict(x_test)
real_sparse_dt_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_dt_scores
y_pred = clf.predict(x_test)
real_dimension_dt_scores = accuracy_score(y_test, y_pred)
clf = DecisionTreeClassifier(criterion='entropy')
clf = clf.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_dt_entropy_scores
y_pred = clf.predict(x_test)
real_linear_dt_entropy_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 2):
global real_nonlinear_dt_entropy_scores
y_pred = clf.predict(x_test)
real_nonlinear_dt_entropy_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_dt_entropy_scores
y_pred = clf.predict(x_test)
real_sparse_dt_entropy_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_dt_entropy_scores
y_pred = clf.predict(x_test)
real_dimension_dt_entropy_scores = accuracy_score(y_test, y_pred)
rf = RandomForestClassifier(criterion='gini')
rf = rf.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_rf_scores
y_pred = rf.predict(x_test)
real_linear_rf_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==2):
global real_nonlinear_rf_scores
y_pred = rf.predict(x_test)
real_nonlinear_rf_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_rf_scores
y_pred = rf.predict(x_test)
real_sparse_rf_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_rf_scores
y_pred = rf.predict(x_test)
real_dimension_rf_scores = accuracy_score(y_test, y_pred)
rf = RandomForestClassifier(criterion='entropy')
rf = rf.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_rf_entropy_scores
y_pred = rf.predict(x_test)
real_linear_rf_entropy_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 2):
global real_nonlinear_rf_entropy_scores
y_pred = rf.predict(x_test)
real_nonlinear_rf_entropy_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_rf_entropy_scores
y_pred = rf.predict(x_test)
real_sparse_rf_entropy_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_rf_entropy_scores
y_pred = rf.predict(x_test)
real_dimension_rf_entropy_scores = accuracy_score(y_test, y_pred)
lr = LogisticRegression(penalty='none')
lr = lr.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_lr_scores
y_pred = lr.predict(x_test)
real_linear_lr_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==2):
global real_nonlinear_lr_scores
y_pred = lr.predict(x_test)
real_nonlinear_lr_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_lr_scores
y_pred = lr.predict(x_test)
real_sparse_lr_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_lr_scores
y_pred = lr.predict(x_test)
real_dimension_lr_scores = accuracy_score(y_test, y_pred)
lr = LogisticRegression(penalty='l1', solver='liblinear', l1_ratio=1)
lr = lr.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_lr_l1_scores
y_pred = lr.predict(x_test)
real_linear_lr_l1_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 2):
global real_nonlinear_lr_l1_scores
y_pred = lr.predict(x_test)
real_nonlinear_lr_l1_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_lr_l1_scores
y_pred = lr.predict(x_test)
real_sparse_lr_l1_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_lr_l1_scores
y_pred = lr.predict(x_test)
real_dimension_lr_l1_scores = accuracy_score(y_test, y_pred)
lr = LogisticRegression(penalty='l2')
lr = lr.fit(x_train, y_train)
coef = lr.coef_
print("This is the coeff ", coef)
if (pipeline_type == 1):
global real_linear_lr_l2_scores
y_pred = lr.predict(x_test)
real_linear_lr_l2_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 2):
global real_nonlinear_lr_l2_scores
y_pred = lr.predict(x_test)
real_nonlinear_lr_l2_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_lr_l2_scores
y_pred = lr.predict(x_test)
real_sparse_lr_l2_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_lr_l2_scores
y_pred = lr.predict(x_test)
real_dimension_lr_l2_scores = accuracy_score(y_test, y_pred)
lr = LogisticRegression(penalty='elasticnet', solver='saga', l1_ratio=0.5)
lr = lr.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_lr_elastic_scores
y_pred = lr.predict(x_test)
real_linear_lr_elastic_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 2):
global real_nonlinear_lr_elastic_scores
y_pred = lr.predict(x_test)
real_nonlinear_lr_elastic_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_lr_elastic_scores
y_pred = lr.predict(x_test)
real_sparse_lr_elastic_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_lr_elastic_scores
y_pred = lr.predict(x_test)
real_dimension_lr_elastic_scores = accuracy_score(y_test, y_pred)
gnb = BernoulliNB()
gnb = gnb.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_gb_scores
y_pred = gnb.predict(x_test)
real_linear_gb_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 2):
global real_nonlinear_gb_scores
y_pred = gnb.predict(x_test)
real_nonlinear_gb_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_gb_scores
y_pred = gnb.predict(x_test)
real_sparse_gb_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_gb_scores
y_pred = gnb.predict(x_test)
real_dimension_gb_scores = accuracy_score(y_test, y_pred)
gnb = GaussianNB()
gnb = gnb.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_gb_gaussian_scores
y_pred = gnb.predict(x_test)
real_linear_gb_gaussian_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 2):
global real_nonlinear_gb_gaussian_scores
y_pred = gnb.predict(x_test)
real_nonlinear_gb_gaussian_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_gb_gaussian_scores
y_pred = gnb.predict(x_test)
real_sparse_gb_gaussian_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_gb_gaussian_scores
y_pred = gnb.predict(x_test)
real_dimension_gb_gaussian_scores = accuracy_score(y_test, y_pred)
min_max_scaler = MinMaxScaler()
X_train_minmax = min_max_scaler.fit_transform(x_train)
X_test_minmax = min_max_scaler.transform(x_test)
gnb = MultinomialNB()
gnb = gnb.fit(X_train_minmax, y_train)
if (pipeline_type == 1):
global real_linear_gb_multi_scores
y_pred = gnb.predict(X_test_minmax)
real_linear_gb_multi_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 2):
global real_nonlinear_gb_multi_scores
y_pred = gnb.predict(X_test_minmax)
real_nonlinear_gb_multi_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_gb_multi_scores
y_pred = gnb.predict(X_test_minmax)
real_sparse_gb_multi_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_gb_multi_scores
y_pred = gnb.predict(X_test_minmax)
real_dimension_gb_multi_scores = accuracy_score(y_test, y_pred)
min_max_scaler = MinMaxScaler()
X_train_minmax = min_max_scaler.fit_transform(x_train)
X_test_minmax = min_max_scaler.transform(x_test)
gnb = ComplementNB()
gnb = gnb.fit(X_train_minmax, y_train)
if (pipeline_type == 1):
global real_linear_gb_complement_scores
y_pred = gnb.predict(X_test_minmax)
real_linear_gb_complement_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 2):
global real_nonlinear_gb_complement_scores
y_pred = gnb.predict(X_test_minmax)
real_nonlinear_gb_complement_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 3):
global real_sparse_gb_complement_scores
y_pred = gnb.predict(X_test_minmax)
real_sparse_gb_complement_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_gb_complement_scores
y_pred = gnb.predict(X_test_minmax)
real_dimension_gb_complement_scores = accuracy_score(y_test, y_pred)
clf = svm.SVC(kernel="sigmoid")
clf = clf.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_svm_scores
y_pred = clf.predict(x_test)
real_linear_svm_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==2):
global real_nonlinear_svm_scores
y_pred = clf.predict(x_test)
real_nonlinear_svm_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==3):
global real_sparse_svm_scores
y_pred = clf.predict(x_test)
real_sparse_svm_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_svm_scores
y_pred = clf.predict(x_test)
real_dimension_svm_scores = accuracy_score(y_test, y_pred)
#clf = svm.SVC(kernel="linear")
#clf = clf.fit(x_train, y_train)
#y_pred = clf.predict(x_test)
#if (pipeline_type == 1):
# global real_linear_svm_linear_scores
# real_linear_svm_linear_scores = cross_val_score(clf, x_train, y_train, cv=10)
#elif(pipeline_type==2):
# global real_nonlinear_svm_linear_scores
# real_nonlinear_svm_linear_scores = cross_val_score(clf, x_train, y_train, cv=10)
#elif(pipeline_type==3):
# global real_sparse_svm_linear_scores
# real_sparse_svm_linear_scores = cross_val_score(clf, x_train, y_train, cv=10)
#elif (pipeline_type == 4):
# global real_dimension_svm_linear_scores
# real_dimension_svm_linear_scores = cross_val_score(clf, x_train, y_train, cv=10)
clf = svm.SVC(kernel="poly")
clf = clf.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_svm_poly_scores
y_pred = clf.predict(x_test)
real_linear_svm_poly_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==2):
global real_nonlinear_svm_poly_scores
clf.predict(x_test)
real_nonlinear_svm_poly_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==3):
global real_sparse_svm_poly_scores
clf.predict(x_test)
real_sparse_svm_poly_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_svm_poly_scores
clf.predict(x_test)
real_dimension_svm_poly_scores = accuracy_score(y_test, y_pred)
clf = svm.SVC(kernel="rbf")
clf = clf.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_svm_rbf_scores
y_pred = clf.predict(x_test)
real_linear_svm_rbf_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==2):
global real_nonlinear_svm_rbf_scores
y_pred = clf.predict(x_test)
real_nonlinear_svm_rbf_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==3):
global real_sparse_svm_rbf_scores
y_pred = clf.predict(x_test)
real_sparse_svm_rbf_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_svm_rbf_scores
y_pred = clf.predict(x_test)
real_dimension_svm_rbf_scores = accuracy_score(y_test, y_pred)
# clf = svm.SVC(kernel="precomputed")
# clf = clf.fit(x_train, y_train)
# y_pred = clf.predict(x_test)
# if (pipeline_type == 1):
# global real_linear_svm_precomputed_scores
# real_linear_svm_precomputed_scores = cross_val_score(clf, x_train, y_train, cv=10)
# elif(pipeline_type==2):
# global real_nonlinear_svm_precomputed_scores
# real_nonlinear_svm_precomputed_scores = cross_val_score(clf, x_train, y_train, cv=10)
# elif(pipeline_type==3):
# global real_sparse_svm_precomputed_scores
# real_sparse_svm_precomputed_scores = cross_val_score(clf, x_train, y_train, cv=10)
# elif (pipeline_type == 4):
# global real_dimension_svm_precomputed_scores
# real_dimension_svm_precomputed_scores = cross_val_score(clf, x_train, y_train, cv=10)
clf = KNeighborsClassifier(weights='uniform')
clf = clf.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_knn_scores
y_pred = clf.predict(x_test)
real_linear_knn_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==2):
global real_nonlinear_knn_scores
y_pred = clf.predict(x_test)
real_nonlinear_knn_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==3):
global real_sparse_knn_scores
y_pred = clf.predict(x_test)
real_sparse_knn_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_knn_scores
y_pred = clf.predict(x_test)
real_dimension_knn_scores = accuracy_score(y_test, y_pred)
clf = KNeighborsClassifier(weights='distance')
clf = clf.fit(x_train, y_train)
if (pipeline_type == 1):
global real_linear_knn_distance_scores
y_pred = clf.predict(x_test)
real_linear_knn_distance_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==2):
global real_nonlinear_knn_distance_scores
y_pred = clf.predict(x_test)
real_nonlinear_knn_distance_scores = accuracy_score(y_test, y_pred)
elif(pipeline_type==3):
global real_sparse_knn_distance_scores
y_pred = clf.predict(x_test)
real_sparse_knn_distance_scores = accuracy_score(y_test, y_pred)
elif (pipeline_type == 4):
global real_dimension_knn_distance_scores
y_pred = clf.predict(x_test)
real_dimension_knn_distance_scores = accuracy_score(y_test, y_pred)
print("This is the first occurance of the real-world benchmarks")
realworld_evaluate(pipeline_type)
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
realworld_evaluate(pipeline_type)
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
realworld_evaluate(pipeline_type)
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
realworld_evaluate(pipeline_type)
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
# Simulation library structure learning section
print("This is the first occurance of the simulated benchmarks")
simulated_data_train = simulation_notears.notears_setup(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup(train_data_numpy[0:100], 1000, 1000)[1]
#simulation_notears.notears_nonlinear_setup(train_data_numpy[0:100], 10000, 5000)
# import the saved training and test data from the simulation framework's learned world
#def import_simulated_csv():
# global no_tears_sample_train
# no_tears_sample_train= pd.read_csv('W_est_train.csv')
# #global no_tears_sample_test
# #no_tears_sample_test = pd.read_csv('W_est_test.csv')
# #global no_tears_nonlinear_sample_train
# #no_tears_nonlinear_sample_train = pd.read_csv('K_est_train.csv')
# #global no_tears_nonlinear_sample_test
# #no_tears_nonlinear_sample_test = pd.read_csv('K_est_test.csv')
# global bn_learn_sample_train
# bn_learn_sample_train = pd.read_csv('Z_est_train.csv')
# #global bn_learn_sample_test
# #bn_learn_sample_test = pd.read_csv('Z_est_test.csv')
# global pomegranate_sample_train
# pomegranate_sample_train = pd.read_csv('X_est_train.csv')
# global pgmpy_sample_train
# pgmpy_sample_train = pd.read_csv('V_est_train.csv')
#import_simulated_csv()
def run_learned_workflows(x_train, y_train, x_test, y_test, pipeline_type, alg):
print("alg:"+alg+", pipeline:"+str(pipeline_type))
my_dict = {"alg": alg, "pl": pipeline_type, "dt": 0, "dt_e": 0, "rf": 0, "rf_E": 0,"lr": 0, "lr_l1": 0, "lr_l2": 0, "lr_e": 0, "nb": 0, "nb_g": 0,"nb_m": 0,"nb_c": 0,"svm": 0,"svm_l": 0,"svm_po": 0,"svm_r": 0,"svm_pr": 0, "knn": 0, "knn_d": 0}
my_dict["dt"] = simulation_models.decision_tree(x_train, y_train, x_test, y_test)
my_dict["dt_e"] = simulation_models.decision_tree_entropy(x_train, y_train, x_test, y_test)
my_dict["rf"] = simulation_models.random_forest(x_train, y_train, x_test, y_test)
my_dict["rf_e"] = simulation_models.random_forest_entropy(x_train, y_train, x_test, y_test)
my_dict["lr"] = simulation_models.logistic_regression(x_train, y_train, x_test, y_test)
my_dict["lr_l1"] = simulation_models.logistic_regression_l1(x_train, y_train, x_test, y_test)
my_dict["lr_l2"] = simulation_models.logistic_regression_l2(x_train, y_train, x_test, y_test)
my_dict["lr_e"] = simulation_models.logistic_regression_elastic(x_train, y_train, x_test, y_test)
my_dict["nb"] = simulation_models.naive_bayes(x_train, y_train, x_test, y_test)
my_dict["nb_g"] = simulation_models.naive_bayes_gaussian(x_train, y_train, x_test, y_test)
my_dict["nb_m"] = simulation_models.naive_bayes_multinomial(x_train, y_train, x_test, y_test)
my_dict["nb_c"] = simulation_models.naive_bayes_complement(x_train, y_train, x_test, y_test)
my_dict["svm"] = simulation_models.support_vector_machines(x_train, y_train, x_test, y_test)
#my_dict["svm_l"] = simulation_models.support_vector_machines_linear(x_train, y_train, x_test, y_test)
my_dict["svm_po"] = simulation_models.support_vector_machines_poly(x_train, y_train, x_test, y_test)
my_dict["svm_r"] = simulation_models.support_vector_machines_rbf(x_train, y_train, x_test, y_test)
#my_dict["svm_pr"] = simulation_models.support_vector_machines_precomputed(x_train, y_train, x_test, y_test)
my_dict["knn"] = simulation_models.k_nearest_neighbor(x_train, y_train, x_test, y_test)
my_dict["knn_d"] = simulation_models.k_nearest_neighbor_distance(x_train, y_train, x_test, y_test)
return my_dict
#helper function to execute one workflow with parameterised setup
#def execute_pipeline(x_train, y_train, run_pipeline_type, pipeline_title):
# pipeline_type = 2
# simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
# import_real_world_csv(pipeline_type)
#notears simulation scoring
notears_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "NO TEARS (Logistic)")
notears_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "NO TEARS (Logistic)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup(train_data_numpy[0:100], 1000, 1000)[1]
notears_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "NO TEARS (Logistic)")
notears_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4],pipeline_type, "NO TEARS (Logistic)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup(train_data_numpy[0:100], 1000, 1000)[1]
notears_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "NO TEARS (Logistic)")
notears_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "NO TEARS (Logistic)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup(train_data_numpy[0:100], 1000, 1000)[1]
notears_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "NO TEARS (Logistic)")
notears_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10], pipeline_type, "NO TEARS (Logistic)")
#notears hyperparameter loss function l2
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup_b(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup_b(train_data_numpy[0:100], 1000, 1000)[1]
notears_l2_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "NO TEARS (L2)")
notears_l2_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4],pipeline_type, "NO TEARS (L2)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup_b(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup_b(train_data_numpy[0:100], 1000, 1000)[1]
notears_l2_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "NO TEARS (L2)")
notears_l2_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4],pipeline_type, "NO TEARS (L2)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup_b(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup_b(train_data_numpy[0:100], 1000, 1000)[1]
notears_l2_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "NO TEARS (L2)")
notears_l2_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "NO TEARS (L2)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup_b(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup_b(train_data_numpy[0:100], 1000, 1000)[1]
notears_l2_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "NO TEARS (L2)")
notears_l2_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10],pipeline_type, "NO TEARS (L2)")
#notears hyperparameter loss function poisson
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup_c(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup_c(train_data_numpy[0:100], 1000, 1000)[1]
notears_poisson_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "NO TEARS (Poisson)")
notears_poisson_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "NO TEARS (Poisson)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup_c(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup_c(train_data_numpy[0:100], 1000, 1000)[1]
notears_poisson_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "NO TEARS (Poisson)")
notears_poisson_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "NO TEARS (Poisson)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup_c(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup_c(train_data_numpy[0:100], 1000, 1000)[1]
notears_poisson_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "NO TEARS (Poisson)")
notears_poisson_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4],pipeline_type, "NO TEARS (Poisson)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_notears.notears_setup_c(train_data_numpy[0:100], 1000, 1000)[0]
simulated_data_test = simulation_notears.notears_setup_c(train_data_numpy[0:100], 1000, 1000)[1]
notears_poisson_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "NO TEARS (Poisson)")
notears_poisson_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10],pipeline_type, "NO TEARS (Poisson)")
#bnlearn simulation scoring
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_hc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_hc(train_data[0:100], pipeline_type)[1]
bnlearn_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (HC)")
bnlearn_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (HC)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_hc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_hc(train_data[0:100], pipeline_type)[1]
bnlearn_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (HC)")
bnlearn_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (HC)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_hc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_hc(train_data[0:100], pipeline_type)[1]
bnlearn_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (HC)")
bnlearn_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (HC)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_hc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_hc(train_data[0:100], pipeline_type)[1]
bnlearn_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "BN LEARN (HC)")
bnlearn_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10], pipeline_type, "BN LEARN (HC)")
#Run hyperparameter of bnlearn - tabu
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_tabu(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_tabu(train_data[0:100], pipeline_type)[1]
bnlearn_tabu_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (TABU)")
bnlearn_tabu_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (TABU)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_tabu(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_tabu(train_data[0:100], pipeline_type)[1]
bnlearn_tabu_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (TABU)")
bnlearn_tabu_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (TABU)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_tabu(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_tabu(train_data[0:100], pipeline_type)[1]
bnlearn_tabu_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (TABU)")
bnlearn_tabu_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (TABU)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_tabu(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_tabu(train_data[0:100], pipeline_type)[1]
bnlearn_tabu_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "BN LEARN (TABU)")
bnlearn_tabu_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10], pipeline_type, "BN LEARN (TABU)")
#end of tabu workflows
#Run hyperparameter of bnlearn - pc
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_pc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_pc(train_data[0:100], pipeline_type)[1]
bnlearn_pc_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (PC)")
bnlearn_pc_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (PC)")
#pipeline_type = 2
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_pc(train_data[0:100], pipeline_type) #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed.
#import_simulated_csv()
#bnlearn_pc_nonlinear_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:4], bn_learn_sample_train.iloc[:,4], pipeline_type, "BN LEARN (PC)")
#pipeline_type = 3
#simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_pc(train_data[0:100], pipeline_type) #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed.
#import_simulated_csv()
#bnlearn_pc_sparse_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:4], bn_learn_sample_train.iloc[:,4], pipeline_type, "BN LEARN (PC)")
#pipeline_type = 4
#simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_pc(train_data[0:100], pipeline_type) #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
#import_simulated_csv()
#bnlearn_pc_dimension_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:2], bn_learn_sample_train.iloc[:,2], pipeline_type, "BN LEARN (PC)")
#end of pc workflows
#Run hyperparameter of bnlearn - gs
#pipeline_type = 1
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_gs(train_data[0:100], pipeline_type)
#import_simulated_csv()
#bnlearn_gs_linear_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:4], bn_learn_sample_train.iloc[:,4], pipeline_type, "BN LEARN (GS)")
#pipeline_type = 2
#simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_gs(train_data[0:100], pipeline_type)
#import_simulated_csv()
#bnlearn_gs_nonlinear_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:4], bn_learn_sample_train.iloc[:,4], pipeline_type, "BN LEARN (GS)")
#pipeline_type = 3
#simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_gs(train_data[0:100], pipeline_type) #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
#import_simulated_csv()
#bnlearn_gs_sparse_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:4], bn_learn_sample_train.iloc[:,4], pipeline_type, "BN LEARN (GS)")
#pipeline_type = 4
#simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_gs(train_data[0:100], pipeline_type) #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
#import_simulated_csv()
#bnlearn_gs_dimension_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:2], bn_learn_sample_train.iloc[:,2], pipeline_type, "BN LEARN (GS)")
#end of gs workflows
#Run hyperparameter of bnlearn - iamb
#pipeline_type = 1
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_iamb(train_data[0:100], pipeline_type) #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed.
#import_simulated_csv()
#bnlearn_iamb_linear_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:4], bn_learn_sample_train.iloc[:,4], pipeline_type, "BN LEARN (IAMB)")
#pipeline_type = 2
#simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_iamb(train_data[0:100], pipeline_type) #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
#import_simulated_csv()
#bnlearn_iamb_nonlinear_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:4], bn_learn_sample_train.iloc[:,4], pipeline_type, "BN LEARN (IAMB)")
#pipeline_type = 3
#simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_iamb(train_data[0:100], pipeline_type) #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
#import_simulated_csv()
#bnlearn_iamb_sparse_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:4], bn_learn_sample_train.iloc[:,4], pipeline_type, "BN LEARN (IAMB)")
#pipeline_type = 4
#simulation_dagsim.setup_realworld(pipeline_type, 10000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_bnlearn.bnlearn_setup_iamb(train_data[0:100], pipeline_type) #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
#import_simulated_csv()
#bnlearn_iamb_dimension_dict_scores = run_learned_workflows(bn_learn_sample_train.iloc[:,0:2], bn_learn_sample_train.iloc[:,2], pipeline_type, "BN LEARN (IAMB)")
#end of pc workflows
#Run hyperparameter of bnlearn - mmhc
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_mmhc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_mmhc(train_data[0:100], pipeline_type)[1]
bnlearn_mmhc_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (MMHC)")
bnlearn_mmhc_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (MMHC)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_mmhc(train_data[0:100], pipeline_type)[0] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
simulated_data_test = simulation_bnlearn.bnlearn_setup_mmhc(train_data[0:100], pipeline_type)[1] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
bnlearn_mmhc_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (MMHC)")
bnlearn_mmhc_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (MMHC)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_mmhc(train_data[0:100], pipeline_type)[0] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
simulated_data_test = simulation_bnlearn.bnlearn_setup_mmhc(train_data[0:100], pipeline_type)[1] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
bnlearn_mmhc_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (MMHC)")
bnlearn_mmhc_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4],pipeline_type, "BN LEARN (MMHC)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_mmhc(train_data[0:100], pipeline_type)[0] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
simulated_data_test = simulation_bnlearn.bnlearn_setup_mmhc(train_data[0:100], pipeline_type)[1] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
bnlearn_mmhc_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "BN LEARN (MMHC)")
bnlearn_mmhc_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10], pipeline_type, "BN LEARN (MMHC)")
#end of mmhc workflows
#Run hyperparameter of bnlearn - rsmax2
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_rsmax2(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_rsmax2(train_data[0:100], pipeline_type)[1]
bnlearn_rsmax2_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (RSMAX2)")
bnlearn_rsmax2_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (RSMAX2)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_rsmax2(train_data[0:100], pipeline_type)[0] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
simulated_data_test = simulation_bnlearn.bnlearn_setup_rsmax2(train_data[0:100], pipeline_type)[1] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
bnlearn_rsmax2_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (RSMAX2)")
bnlearn_rsmax2_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (RSMAX2)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_rsmax2(train_data[0:100], pipeline_type)[0] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
simulated_data_test = simulation_bnlearn.bnlearn_setup_rsmax2(train_data[0:100], pipeline_type)[1] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
bnlearn_rsmax2_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (RSMAX2)")
bnlearn_rsmax2_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (RSMAX2)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_rsmax2(train_data[0:100], pipeline_type)[0] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
simulated_data_test = simulation_bnlearn.bnlearn_setup_rsmax2(train_data[0:100], pipeline_type)[1] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
bnlearn_rsmax2_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "BN LEARN (RSMAX2)")
bnlearn_rsmax2_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10], pipeline_type, "BN LEARN (RSMAX2)")
#end of rsmax2 workflows
#Run hyperparameter of bnlearn - h2pc
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_h2pc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_bnlearn.bnlearn_setup_h2pc(train_data[0:100], pipeline_type)[1]
bnlearn_h2pc_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (H2PC)")
bnlearn_h2pc_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (H2PC)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_h2pc(train_data[0:100], pipeline_type)[0] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
simulated_data_test = simulation_bnlearn.bnlearn_setup_h2pc(train_data[0:100], pipeline_type)[1] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
bnlearn_h2pc_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (H2PC)")
bnlearn_h2pc_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4],pipeline_type, "BN LEARN (H2PC)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_h2pc(train_data[0:100], pipeline_type)[0] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
simulated_data_test = simulation_bnlearn.bnlearn_setup_h2pc(train_data[0:100], pipeline_type)[1] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
bnlearn_h2pc_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "BN LEARN (H2PC)")
bnlearn_h2pc_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "BN LEARN (H2PC)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_bnlearn.bnlearn_setup_h2pc(train_data[0:100], pipeline_type)[0] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
simulated_data_test = simulation_bnlearn.bnlearn_setup_h2pc(train_data[0:100], pipeline_type)[1] #rpy2.rinterface_lib.embedded.RRuntimeError: Error in bn.fit(my_bn, databn) : the graph is only partially directed
bnlearn_h2pc_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "BN LEARN (H2PC)")
bnlearn_h2pc_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10], pipeline_type, "BN LEARN (H2PC)")
#end of h2pc workflows
#pomegranate simulation scoring
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pomegranate.pomegranate_setup(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pomegranate.pomegranate_setup(train_data[0:100], pipeline_type)[1]
pomegranate_exact_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "POMEGRANATE (EXACT)")
pomegranate_exact_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "POMEGRANATE (EXACT)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pomegranate.pomegranate_setup(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pomegranate.pomegranate_setup(train_data[0:100], pipeline_type)[1]
pomegranate_exact_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "POMEGRANATE (EXACT)")
pomegranate_exact_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "POMEGRANATE (EXACT)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pomegranate.pomegranate_setup(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pomegranate.pomegranate_setup(train_data[0:100], pipeline_type)[1]
pomegranate_exact_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "POMEGRANATE (EXACT)")
pomegranate_exact_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "POMEGRANATE (EXACT)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pomegranate.pomegranate_setup(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pomegranate.pomegranate_setup(train_data[0:100], pipeline_type)[1]
pomegranate_exact_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "POMEGRANATE (EXACT)")
pomegranate_exact_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10], pipeline_type, "POMEGRANATE (EXACT)")
#pomegranate hyperparameter simulation scoring - greedy
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pomegranate.pomegranate_setup_b(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pomegranate.pomegranate_setup_b(train_data[0:100], pipeline_type)[1]
pomegranate_greedy_linear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "POMEGRANATE (GREEDY)")
pomegranate_greedy_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4],pipeline_type, "POMEGRANATE (GREEDY)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pomegranate.pomegranate_setup_b(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pomegranate.pomegranate_setup_b(train_data[0:100], pipeline_type)[1]
pomegranate_greedy_nonlinear_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "POMEGRANATE (GREEDY)")
pomegranate_greedy_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "POMEGRANATE (GREEDY)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pomegranate.pomegranate_setup_b(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pomegranate.pomegranate_setup_b(train_data[0:100], pipeline_type)[1]
pomegranate_greedy_sparse_dict_scores = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], x_test, y_test, pipeline_type, "POMEGRANATE (GREEDY)")
pomegranate_greedy_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:4], simulated_data_train[:,4], simulated_data_test[:,0:4], simulated_data_test[:,4], pipeline_type, "POMEGRANATE (GREEDY)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pomegranate.pomegranate_setup_b(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pomegranate.pomegranate_setup_b(train_data[0:100], pipeline_type)[1]
pomegranate_greedy_dimension_dict_scores = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], x_test, y_test, pipeline_type, "POMEGRANATE (GREEDY)")
pomegranate_greedy_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train[:,0:10], simulated_data_train[:,10], simulated_data_test[:,0:10], simulated_data_test[:,10], pipeline_type, "POMEGRANATE (GREEDY)")
#pomegranate hyperparameter simulation scoring - Chow-liu
#pipeline_type = 1
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_pomegranate.pomegranate_setup_c(train_data[0:100], pipeline_type)
#import_simulated_csv()
#pomegranate_chow_linear_dict_scores = run_learned_workflows(pomegranate_sample_train.iloc[:,0:4], pomegranate_sample_train.iloc[:,4], pipeline_type, "POMEGRANATE (CHOW-LIU)")
#pipeline_type = 2
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_pomegranate.pomegranate_setup_c(train_data[0:100], pipeline_type)
#import_simulated_csv()
#pomegranate_chow_nonlinear_dict_scores = run_learned_workflows(pomegranate_sample_train.iloc[:,0:4], pomegranate_sample_train.iloc[:,4], pipeline_type, "POMEGRANATE (CHOW-LIU)")
#pipeline_type = 3
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_pomegranate.pomegranate_setup_c(train_data[0:100], pipeline_type)
#import_simulated_csv()
#pomegranate_chow_sparse_dict_scores = run_learned_workflows(pomegranate_sample_train.iloc[:,0:4], pomegranate_sample_train.iloc[:,4], pipeline_type, "POMEGRANATE (CHOW-LIU)")
#pipeline_type = 4
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_pomegranate.pomegranate_setup_c(train_data[0:100], pipeline_type)
#import_simulated_csv()
#pomegranate_chow_dimension_dict_scores = run_learned_workflows(pomegranate_sample_train.iloc[:,0:10], pomegranate_sample_train.iloc[:,10], pipeline_type, "POMEGRANATE (CHOW-LIU)")
#pgmpy simulation scoring -Hill-climbing
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_hc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_hc(train_data[0:100], pipeline_type)[1]
pgmpy_hc_linear_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], x_test, y_test, pipeline_type, "PGMPY (HC)")
pgmpy_hc_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], simulated_data_test.iloc[:,0:4], simulated_data_test.iloc[:,4], pipeline_type, "PGMPY (HC)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_hc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_hc(train_data[0:100], pipeline_type)[1]
pgmpy_hc_nonlinear_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], x_test, y_test, pipeline_type, "PGMPY (HC)")
pgmpy_hc_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], simulated_data_test.iloc[:,0:4], simulated_data_test.iloc[:,4], pipeline_type, "PGMPY (HC)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_hc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_hc(train_data[0:100], pipeline_type)[1]
pgmpy_hc_sparse_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], x_test, y_test, pipeline_type, "PGMPY (HC)")
pgmpy_hc_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], simulated_data_test.iloc[:,0:4], simulated_data_test.iloc[:,4], pipeline_type, "PGMPY (HC)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_hc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_hc(train_data[0:100], pipeline_type)[1]
pgmpy_hc_dimension_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:10], simulated_data_train.iloc[:,10], x_test, y_test, pipeline_type, "PGMPY (HC)")
pgmpy_hc_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:10], simulated_data_train.iloc[:,10], simulated_data_test.iloc[:,0:10], simulated_data_test.iloc[:,10], pipeline_type, "PGMPY (HC)")
#pgmpy simulation scoring - Tree search
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_tree(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_tree(train_data[0:100], pipeline_type)[1]
pgmpy_tree_linear_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], x_test, y_test, pipeline_type, "PGMPY (Tree)")
pgmpy_tree_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], simulated_data_test.iloc[:,0:4], simulated_data_test.iloc[:,4], pipeline_type, "PGMPY (Tree)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_tree(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_tree(train_data[0:100], pipeline_type)[1]
pgmpy_tree_nonlinear_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], x_test, y_test, pipeline_type, "PGMPY (Tree)")
pgmpy_tree_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], simulated_data_test.iloc[:,0:4], simulated_data_test.iloc[:,4], pipeline_type, "PGMPY (Tree)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_tree(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_tree(train_data[0:100], pipeline_type)[1]
pgmpy_tree_sparse_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], x_test, y_test, pipeline_type, "PGMPY (TREE)")
pgmpy_tree_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], simulated_data_test.iloc[:,0:4], simulated_data_test.iloc[:,4], pipeline_type, "PGMPY (TREE)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_tree(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_tree(train_data[0:100], pipeline_type)[1]
pgmpy_tree_dimension_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:10], simulated_data_train.iloc[:,10], x_test, y_test, pipeline_type, "PGMPY (TREE)")
pgmpy_tree_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:10], simulated_data_train.iloc[:,10], simulated_data_test.iloc[:,0:10], simulated_data_test.iloc[:,10], pipeline_type, "PGMPY (TREE)")
#pgmpy simulation scoring - MMHC
pipeline_type = 1
simulation_dagsim.setup_realworld(pipeline_type, 1000, 1000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_mmhc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_mmhc(train_data[0:100], pipeline_type)[1]
pgmpy_mmhc_linear_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], x_test, y_test, pipeline_type, "PGMPY (MMHC)")
pgmpy_mmhc_linear_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], simulated_data_test.iloc[:,0:4], simulated_data_test.iloc[:,4], pipeline_type, "PGMPY (MMHC)")
pipeline_type = 2
simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_mmhc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_mmhc(train_data[0:100], pipeline_type)[1]
pgmpy_mmhc_nonlinear_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], x_test, y_test, pipeline_type, "PGMPY (MMHC)")
pgmpy_mmhc_nonlinear_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], simulated_data_test.iloc[:,0:4], simulated_data_test.iloc[:,4], pipeline_type, "PGMPY (MMHC)")
pipeline_type = 3
simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_mmhc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_mmhc(train_data[0:100], pipeline_type)[1]
pgmpy_mmhc_sparse_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], x_test, y_test, pipeline_type, "PGMPY (MMHC)")
pgmpy_mmhc_sparse_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:4], simulated_data_train.iloc[:,4], simulated_data_test.iloc[:,0:4], simulated_data_test.iloc[:,4], pipeline_type, "PGMPY (MMHC)")
pipeline_type = 4
simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
import_real_world_csv(pipeline_type)
simulated_data_train = simulation_pgmpy.pgmpy_setup_mmhc(train_data[0:100], pipeline_type)[0]
simulated_data_test = simulation_pgmpy.pgmpy_setup_mmhc(train_data[0:100], pipeline_type)[1]
pgmpy_mmhc_dimension_dict_scores = run_learned_workflows(simulated_data_train.iloc[:,0:10], simulated_data_train.iloc[:,10], x_test, y_test, pipeline_type, "PGMPY (MMHC)")
pgmpy_mmhc_dimension_dict_scores_simtest = run_learned_workflows(simulated_data_train.iloc[:,0:10], simulated_data_train.iloc[:,10], simulated_data_test.iloc[:,0:10], simulated_data_test.iloc[:,10], pipeline_type, "PGMPY (MMHC)")
#pgmpy simulation scoring - PC - - single positional indexer is out-of-bounds doesnt output same shape as given
#pipeline_type = 1
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_pgmpy.pgmpy_setup_pc(train_data[0:100], pipeline_type)
#import_simulated_csv()
#pgmpy_pc_linear_dict_scores = run_learned_workflows(pgmpy_sample_train.iloc[:,0:4], pgmpy_sample_train.iloc[:,4], pipeline_type, "PGMPY (PC)")
#pipeline_type = 2
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_pgmpy.pgmpy_setup_pc(train_data[0:100], pipeline_type)
#import_simulated_csv()
#pgmpy_pc_nonlinear_dict_scores = run_learned_workflows(pgmpy_sample_train.iloc[:,0:4], pgmpy_sample_train.iloc[:,4], pipeline_type, "PGMPY (PC)")
#pipeline_type = 3
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_pgmpy.pgmpy_setup_pc(train_data[0:100], pipeline_type)
#import_simulated_csv()
#pgmpy_pc_sparse_dict_scores = run_learned_workflows(pgmpy_sample_train.iloc[:,0:4], pgmpy_sample_train.iloc[:,4], pipeline_type, "PGMPY (PC)")
#pipeline_type = 4
#simulation_dagsim.setup_realworld(pipeline_type, 1000, 5000)
#import_real_world_csv(pipeline_type)
#simulation_pgmpy.pgmpy_setup_pc(train_data[0:100], pipeline_type)
#import_simulated_csv()
#pgmpy_pc_dimension_dict_scores = run_learned_workflows(pgmpy_sample_train.iloc[:,0:10], pgmpy_sample_train.iloc[:,10], pipeline_type, "PGMPY (PC)")
def write_learned_to_csv():
experiments = ['Algorithm', 'Model', 'Linear', 'Non-linear', 'Sparsity', 'Dimensionality']
with open('simulation_experiments_summary.csv', 'w', newline='') as csvfile:
fieldnames = ['Algorithm', 'Model', 'Linear', 'Non-linear', 'Sparsity', 'Dimensionality']
thewriter = csv.DictWriter(csvfile, fieldnames=fieldnames)
thewriter.writeheader()
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Decision Tree (gini)','Linear': str(notears_l2_linear_dict_scores["dt"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["dt"]),'Sparsity': str(notears_l2_sparse_dict_scores["dt"]) ,'Dimensionality': str(notears_l2_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Decision Tree (entropy)','Linear': str(notears_l2_linear_dict_scores["dt_e"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["dt_e"]),'Sparsity': str(notears_l2_sparse_dict_scores["dt_e"]),'Dimensionality': str(notears_l2_dimension_dict_scores["dt_e"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Random Forest (gini)','Linear': str(notears_l2_linear_dict_scores["rf"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["rf"]) ,'Sparsity': str(notears_l2_sparse_dict_scores["rf"]),'Dimensionality': str(notears_l2_dimension_dict_scores["rf"]) })
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Random Forest (entropy)','Linear': str(notears_l2_linear_dict_scores["rf_e"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["rf_e"]),'Sparsity': str(notears_l2_sparse_dict_scores["rf_e"]) ,'Dimensionality': str(notears_l2_dimension_dict_scores["rf_e"]) })
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Logistic Regression (penalty-none)','Linear': str(notears_l2_linear_dict_scores["lr"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["lr"]),'Sparsity': str(notears_l2_sparse_dict_scores["lr"]),'Dimensionality': str(notears_l2_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Logistic Regression (l1)','Linear': str(notears_l2_linear_dict_scores["lr_l1"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["lr_l1"]),'Sparsity': str(notears_l2_sparse_dict_scores["lr_l1"]),'Dimensionality': str(notears_l2_dimension_dict_scores["lr_l1"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Logistic Regression (l2)','Linear': str(notears_l2_linear_dict_scores["lr_l2"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["lr_l2"]),'Sparsity': str(notears_l2_sparse_dict_scores["lr_l2"]),'Dimensionality': str(notears_l2_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(notears_l2_linear_dict_scores["lr_e"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["lr_e"]),'Sparsity': str(notears_l2_sparse_dict_scores["lr_e"]),'Dimensionality': str(notears_l2_dimension_dict_scores["lr_e"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Naive Bayes (Bernoulli)','Linear': str(notears_l2_linear_dict_scores["nb"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["nb"]) ,'Sparsity': str(notears_l2_sparse_dict_scores["nb"]),'Dimensionality': str(notears_l2_dimension_dict_scores["nb"]) })
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(notears_l2_linear_dict_scores["nb_m"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["nb_m"]),'Sparsity': str(notears_l2_sparse_dict_scores["nb_m"]),'Dimensionality': str(notears_l2_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(notears_l2_linear_dict_scores["nb_g"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["nb_g"]),'Sparsity': str(notears_l2_sparse_dict_scores["nb_g"]),'Dimensionality': str(notears_l2_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Naive Bayes (Complement)','Linear': str(notears_l2_linear_dict_scores["nb_c"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["nb_c"]),'Sparsity': str(notears_l2_sparse_dict_scores["nb_c"]),'Dimensionality': str(notears_l2_dimension_dict_scores["nb_c"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (sigmoid)','Linear': str(notears_l2_linear_dict_scores["svm"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["svm"]),'Sparsity': str(notears_l2_sparse_dict_scores["svm"]),'Dimensionality': str(notears_l2_dimension_dict_scores["svm"])})
#thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_l"])) + "," + str(max(notears_l2_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_l"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_l"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_l"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_l"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (poly)','Linear': str(notears_l2_linear_dict_scores["svm_po"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["svm_po"]),'Sparsity': str(notears_l2_sparse_dict_scores["svm_po"]),'Dimensionality': str(notears_l2_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (rbf)','Linear': str(notears_l2_linear_dict_scores["svm_r"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["svm_r"]),'Sparsity': str(notears_l2_sparse_dict_scores["svm_r"]),'Dimensionality': str(notears_l2_dimension_dict_scores["svm_r"]) })
#thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_pr"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_pr"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_pr"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'K Nearest Neighbor (uniform)','Linear': str(notears_l2_linear_dict_scores["knn"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["knn"]),'Sparsity': str(notears_l2_sparse_dict_scores["knn"]),'Dimensionality': str(notears_l2_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(notears_l2_linear_dict_scores["knn_d"]),'Non-linear': str(notears_l2_nonlinear_dict_scores["knn_d"]),'Sparsity': str(notears_l2_sparse_dict_scores["knn_d"]),'Dimensionality': str(notears_l2_dimension_dict_scores["knn_d"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Decision Tree (gini)','Linear': str(notears_linear_dict_scores["dt"]), 'Non-linear': str(notears_nonlinear_dict_scores["dt"]), 'Sparsity': str(notears_sparse_dict_scores["dt"]), 'Dimensionality': str(notears_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Decision Tree (entropy)','Linear': str(notears_linear_dict_scores["dt_e"]),'Non-linear': str(notears_nonlinear_dict_scores["dt_e"]),'Sparsity': str(notears_sparse_dict_scores["dt_e"]),'Dimensionality': str(notears_dimension_dict_scores["dt_e"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Random Forest (gini)', 'Linear': str(notears_linear_dict_scores["rf"]), 'Non-linear': str(notears_nonlinear_dict_scores["rf"]), 'Sparsity': str(notears_sparse_dict_scores["rf"]), 'Dimensionality': str(notears_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Random Forest (entropy)','Linear': str(notears_linear_dict_scores["rf_e"]),'Non-linear': str(notears_nonlinear_dict_scores["rf_e"]),'Sparsity': str(notears_sparse_dict_scores["rf_e"]),'Dimensionality': str(notears_dimension_dict_scores["rf_e"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Logistic Regression (penalty-none)', 'Linear': str(notears_linear_dict_scores["lr"]), 'Non-linear': str(notears_nonlinear_dict_scores["lr"]), 'Sparsity': str(notears_sparse_dict_scores["lr"]), 'Dimensionality': str(notears_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Logistic Regression (l1)','Linear': str(notears_linear_dict_scores["lr_l1"]),'Non-linear': str(notears_nonlinear_dict_scores["lr_l1"]) ,'Sparsity': str(notears_sparse_dict_scores["lr_l1"]),'Dimensionality': str(notears_dimension_dict_scores["lr_l1"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Logistic Regression (l2)','Linear': str(notears_linear_dict_scores["lr_l2"]),'Non-linear': str(notears_nonlinear_dict_scores["lr_l2"]),'Sparsity': str(notears_sparse_dict_scores["lr_l2"]),'Dimensionality': str(notears_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(notears_linear_dict_scores["lr_e"]),'Non-linear': str(notears_nonlinear_dict_scores["lr_e"]),'Sparsity': str(notears_sparse_dict_scores["lr_e"]),'Dimensionality': str(notears_dimension_dict_scores["lr_e"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Naive Bayes (Bernoulli)', 'Linear': str(notears_linear_dict_scores["nb"]),'Non-linear': str(notears_nonlinear_dict_scores["nb"]), 'Sparsity': str(notears_sparse_dict_scores["nb"]), 'Dimensionality': str(notears_dimension_dict_scores["nb"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(notears_linear_dict_scores["nb_m"]),'Non-linear': str(notears_nonlinear_dict_scores["nb_m"]),'Sparsity': str(notears_sparse_dict_scores["nb_m"]),'Dimensionality': str(notears_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(notears_linear_dict_scores["nb_g"]),'Non-linear': str(notears_nonlinear_dict_scores["nb_g"]),'Sparsity': str(notears_sparse_dict_scores["nb_g"]),'Dimensionality': str(notears_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Naive Bayes (Complement)','Linear': str(notears_linear_dict_scores["nb_c"]),'Non-linear': str(notears_nonlinear_dict_scores["nb_c"]),'Sparsity': str(notears_sparse_dict_scores["nb_c"]),'Dimensionality': str(notears_dimension_dict_scores["nb_c"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Support Vector Machines (sigmoid)', 'Linear': str(notears_linear_dict_scores["svm"]),'Non-linear': str(notears_nonlinear_dict_scores["svm"]), 'Sparsity': str(notears_sparse_dict_scores["svm"]), 'Dimensionality': str(notears_dimension_dict_scores["svm"])})
#thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(notears_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_linear_dict_scores["svm_l"])) + "," + str(max(notears_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(notears_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_nonlinear_dict_scores["svm_l"])) + "," + str(max(notears_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(notears_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_sparse_dict_scores["svm_l"])) + "," + str(max(notears_sparse_dict_scores["svm_l"])) + "}",'Dimensionality': str(round(mean(notears_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_dimension_dict_scores["svm_l"])) + "," + str(max(notears_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Support Vector Machines (poly)','Linear': str(notears_linear_dict_scores["svm_po"]),'Non-linear': str(notears_nonlinear_dict_scores["svm_po"]),'Sparsity': str(notears_sparse_dict_scores["svm_po"]) ,'Dimensionality': str(notears_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Support Vector Machines (rbf)','Linear': str(notears_linear_dict_scores["svm_r"]),'Non-linear': str(notears_nonlinear_dict_scores["svm_r"]),'Sparsity': str(notears_sparse_dict_scores["svm_r"]),'Dimensionality': str(notears_dimension_dict_scores["svm_r"])})
#thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(notears_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_linear_dict_scores["svm_pr"])) + "," + str(max(notears_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(notears_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_nonlinear_dict_scores["svm_pr"])) + "," + str(max(notears_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(notears_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_sparse_dict_scores["svm_pr"])) + "," + str(max(notears_sparse_dict_scores["svm_pr"])) + "}",'Dimensionality': str(round(mean(notears_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_dimension_dict_scores["svm_pr"])) + "," + str(max(notears_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'K Nearest Neighbor (uniform)', 'Linear': str(notears_linear_dict_scores["knn"]),'Non-linear': str(notears_nonlinear_dict_scores["knn"]), 'Sparsity': str(notears_sparse_dict_scores["knn"]), 'Dimensionality': str(notears_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Logistic)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(notears_linear_dict_scores["knn_d"]),'Non-linear': str(notears_nonlinear_dict_scores["knn_d"]),'Sparsity': str(notears_sparse_dict_scores["knn_d"]),'Dimensionality': str(notears_dimension_dict_scores["knn_d"]) })
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Decision Tree (gini)','Linear': str(notears_poisson_linear_dict_scores["dt"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["dt"]),'Sparsity': str(notears_poisson_sparse_dict_scores["dt"]),'Dimensionality': str(notears_poisson_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Decision Tree (entropy)','Linear': str(notears_poisson_linear_dict_scores["dt_e"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["dt_e"]),'Sparsity': str(notears_poisson_sparse_dict_scores["dt_e"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["dt_e"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Random Forest (gini)','Linear': str(notears_poisson_linear_dict_scores["rf"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["rf"]),'Sparsity': str(notears_poisson_sparse_dict_scores["rf"]),'Dimensionality': str(notears_poisson_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Random Forest (entropy)','Linear': str(notears_poisson_linear_dict_scores["rf_e"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["rf_e"]),'Sparsity': str(notears_poisson_sparse_dict_scores["rf_e"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["rf_e"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Logistic Regression (penalty-none)','Linear': str(notears_poisson_linear_dict_scores["lr"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["lr"]),'Sparsity': str(notears_poisson_sparse_dict_scores["lr"]),'Dimensionality': str(notears_poisson_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Logistic Regression (l1)','Linear': str(notears_poisson_linear_dict_scores["lr_l1"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["lr_l1"]),'Sparsity': str(notears_poisson_sparse_dict_scores["lr_l1"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["lr_l1"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Logistic Regression (l2)','Linear': str(notears_poisson_linear_dict_scores["lr_l2"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["lr_l2"]),'Sparsity': str(notears_poisson_sparse_dict_scores["lr_l2"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(notears_poisson_linear_dict_scores["lr_e"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["lr_e"]),'Sparsity': str(notears_poisson_sparse_dict_scores["lr_e"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["lr_e"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Naive Bayes (Bernoulli)','Linear': str(notears_poisson_linear_dict_scores["nb"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["nb"]),'Sparsity': str(notears_poisson_sparse_dict_scores["nb"]) ,'Dimensionality': str(notears_poisson_dimension_dict_scores["nb"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(notears_poisson_linear_dict_scores["nb_m"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["nb_m"]),'Sparsity': str(notears_poisson_sparse_dict_scores["nb_m"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(notears_poisson_linear_dict_scores["nb_g"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["nb_g"]),'Sparsity': str(notears_poisson_sparse_dict_scores["nb_g"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Naive Bayes (Complement)','Linear': str(notears_poisson_linear_dict_scores["nb_c"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["nb_c"]),'Sparsity': str(notears_poisson_sparse_dict_scores["nb_c"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["nb_c"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Support Vector Machines (sigmoid)','Linear': str(notears_poisson_linear_dict_scores["svm"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["svm"]),'Sparsity': str(notears_poisson_sparse_dict_scores["svm"]),'Dimensionality': str(notears_poisson_dimension_dict_scores["svm"])})
#thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(notears_poisson_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_poisson_linear_dict_scores["svm_l"])) + "," + str(max(notears_poisson_linear_dict_scores["svm_l"])) + "}", 'Non-linear': str(round(mean(notears_poisson_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_poisson_nonlinear_dict_scores["svm_l"])) + "," + str(max(notears_poisson_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(notears_poisson_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_poisson_sparse_dict_scores["svm_l"])) + "," + str(max(notears_poisson_sparse_dict_scores["svm_l"])) + "}", 'Dimensionality': str(round(mean(notears_poisson_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_poisson_dimension_dict_scores["svm_l"])) + "," + str(max(notears_poisson_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Support Vector Machines (poly)','Linear': str(notears_poisson_linear_dict_scores["svm_po"]), 'Non-linear': str(notears_poisson_nonlinear_dict_scores["svm_po"]),'Sparsity': str(notears_poisson_sparse_dict_scores["svm_po"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Support Vector Machines (rbf)','Linear': str(notears_poisson_linear_dict_scores["svm_r"]), 'Non-linear': str(notears_poisson_nonlinear_dict_scores["svm_r"]),'Sparsity': str(notears_poisson_sparse_dict_scores["svm_r"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["svm_r"])})
#thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(notears_poisson_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_poisson_linear_dict_scores["svm_pr"])) + "," + str(max(notears_poisson_linear_dict_scores["svm_pr"])) + "}", 'Non-linear': str(round(mean(notears_poisson_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_poisson_nonlinear_dict_scores["svm_pr"])) + "," + str(max(notears_poisson_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(notears_poisson_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_poisson_sparse_dict_scores["svm_pr"])) + "," + str(max(notears_poisson_sparse_dict_scores["svm_pr"])) + "}", 'Dimensionality': str(round(mean(notears_poisson_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_poisson_dimension_dict_scores["svm_pr"])) + "," + str(max(notears_poisson_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'K Nearest Neighbor (uniform)','Linear': str(notears_poisson_linear_dict_scores["knn"]),'Non-linear': str(notears_poisson_nonlinear_dict_scores["knn"]),'Sparsity': str(notears_poisson_sparse_dict_scores["knn"]),'Dimensionality': str(notears_poisson_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'NO TEARS (Loss-Poisson)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(notears_poisson_linear_dict_scores["knn_d"]), 'Non-linear': str(notears_poisson_nonlinear_dict_scores["knn_d"]),'Sparsity': str(notears_poisson_sparse_dict_scores["knn_d"]), 'Dimensionality': str(notears_poisson_dimension_dict_scores["knn_d"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Decision Tree (gini)', 'Linear': str(bnlearn_linear_dict_scores["dt"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["dt"]), 'Sparsity': str(bnlearn_sparse_dict_scores["dt"]), 'Dimensionality': str(bnlearn_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Decision Tree (entropy)','Linear': str(bnlearn_linear_dict_scores["dt_e"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["dt_e"]),'Sparsity': str(bnlearn_sparse_dict_scores["dt_e"]) ,'Dimensionality': str(bnlearn_dimension_dict_scores["dt_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Random Forest (gini)', 'Linear': str(bnlearn_linear_dict_scores["rf"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["rf"]), 'Sparsity': str(bnlearn_sparse_dict_scores["rf"]), 'Dimensionality': str(bnlearn_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Random Forest (entropy)','Linear': str(bnlearn_linear_dict_scores["rf_e"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["rf_e"]),'Sparsity': str(bnlearn_sparse_dict_scores["rf_e"]),'Dimensionality': str(bnlearn_dimension_dict_scores["rf_e"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Logistic Regression (penalty-none)', 'Linear': str(bnlearn_linear_dict_scores["lr"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["lr"]), 'Sparsity': str(bnlearn_sparse_dict_scores["lr"]), 'Dimensionality': str(bnlearn_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Logistic Regression (l1)','Linear': str(bnlearn_linear_dict_scores["lr_l1"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["lr_l1"]),'Sparsity': str(bnlearn_sparse_dict_scores["lr_l1"]),'Dimensionality': str(bnlearn_dimension_dict_scores["lr_l1"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Logistic Regression (l2)','Linear': str(bnlearn_linear_dict_scores["lr_l2"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["lr_l2"]),'Sparsity': str(bnlearn_sparse_dict_scores["lr_l2"]),'Dimensionality': str(bnlearn_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(bnlearn_linear_dict_scores["lr_e"]) ,'Non-linear': str(bnlearn_nonlinear_dict_scores["lr_e"]),'Sparsity': str(bnlearn_sparse_dict_scores["lr_e"]),'Dimensionality': str(bnlearn_dimension_dict_scores["lr_e"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Naive Bayes (Bernoulli)', 'Linear': str(bnlearn_linear_dict_scores["nb"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["nb"]), 'Sparsity': str(bnlearn_sparse_dict_scores["nb"]), 'Dimensionality': str(bnlearn_dimension_dict_scores["nb"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(bnlearn_linear_dict_scores["nb_m"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["nb_m"]),'Sparsity': str(bnlearn_sparse_dict_scores["nb_m"]),'Dimensionality': str(bnlearn_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(bnlearn_linear_dict_scores["nb_g"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["nb_g"]),'Sparsity': str(bnlearn_sparse_dict_scores["nb_g"]) ,'Dimensionality': str(bnlearn_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Naive Bayes (Complement)','Linear': str(bnlearn_linear_dict_scores["nb_c"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["nb_c"]) ,'Sparsity': str(bnlearn_sparse_dict_scores["nb_c"]) ,'Dimensionality': str(bnlearn_dimension_dict_scores["nb_c"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Support Vector Machines (sigmoid)', 'Linear': str(bnlearn_linear_dict_scores["svm"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["svm"]), 'Sparsity': str(bnlearn_sparse_dict_scores["svm"]), 'Dimensionality': str(bnlearn_dimension_dict_scores["svm"])})
#thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(bnlearn_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_linear_dict_scores["svm_l"])) + "," + str(max(bnlearn_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(bnlearn_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_nonlinear_dict_scores["svm_l"])) + "," + str(max(bnlearn_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(bnlearn_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_sparse_dict_scores["svm_l"])) + "," + str(max(bnlearn_sparse_dict_scores["svm_l"])) + "}",'Dimensionality': str(round(mean(bnlearn_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_dimension_dict_scores["svm_l"])) + "," + str(max(bnlearn_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Support Vector Machines (poly)','Linear': str(bnlearn_linear_dict_scores["svm_po"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["svm_po"]) ,'Sparsity': str(bnlearn_sparse_dict_scores["svm_po"]),'Dimensionality': str(bnlearn_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Support Vector Machines (rbf)','Linear': str(bnlearn_linear_dict_scores["svm_r"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["svm_r"]) ,'Sparsity': str(bnlearn_sparse_dict_scores["svm_r"]) ,'Dimensionality': str(bnlearn_dimension_dict_scores["svm_r"]) })
#thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(bnlearn_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_linear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(bnlearn_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_nonlinear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(bnlearn_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_sparse_dict_scores["svm_pr"])) + "," + str(max(bnlearn_sparse_dict_scores["svm_pr"])) + "}",'Dimensionality': str(round(mean(bnlearn_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_dimension_dict_scores["svm_pr"])) + "," + str(max(bnlearn_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'K Nearest Neighbor (uniform)', 'Linear': str(bnlearn_linear_dict_scores["knn"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["knn"]), 'Sparsity': str(bnlearn_sparse_dict_scores["knn"]), 'Dimensionality': str(bnlearn_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'BN LEARN (HC)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(bnlearn_linear_dict_scores["knn_d"]),'Non-linear': str(bnlearn_nonlinear_dict_scores["knn_d"]) ,'Sparsity': str(bnlearn_sparse_dict_scores["knn_d"]) ,'Dimensionality': str(bnlearn_dimension_dict_scores["knn_d"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Decision Tree (gini)', 'Linear': str(bnlearn_tabu_linear_dict_scores["dt"]),'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["dt"]), 'Sparsity': str(bnlearn_tabu_sparse_dict_scores["dt"]), 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Decision Tree (entropy)','Linear': str(bnlearn_tabu_linear_dict_scores["dt_e"]),'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["dt_e"]),'Sparsity': str(bnlearn_tabu_sparse_dict_scores["dt_e"]), 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["dt_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Random Forest (gini)', 'Linear': str(bnlearn_tabu_linear_dict_scores["rf"]),'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["rf"]), 'Sparsity': str(bnlearn_tabu_sparse_dict_scores["rf"]), 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Random Forest (entropy)','Linear': str(bnlearn_tabu_linear_dict_scores["rf_e"]),'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["rf_e"]),'Sparsity': str(bnlearn_tabu_sparse_dict_scores["rf_e"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["rf_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Logistic Regression (penalty-none)', 'Linear': str(bnlearn_tabu_linear_dict_scores["lr"]),'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["lr"]), 'Sparsity': str(bnlearn_tabu_sparse_dict_scores["lr"]), 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Logistic Regression (l1)','Linear': str(bnlearn_tabu_linear_dict_scores["lr_l1"]) ,'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["lr_l1"]) ,'Sparsity': str(bnlearn_tabu_sparse_dict_scores["lr_l1"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["lr_l1"])})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Logistic Regression (l2)','Linear': str(bnlearn_tabu_linear_dict_scores["lr_l2"]) ,'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["lr_l2"]),'Sparsity': str(bnlearn_tabu_sparse_dict_scores["lr_l2"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(bnlearn_tabu_linear_dict_scores["lr_e"]) ,'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["lr_e"]),'Sparsity': str(bnlearn_tabu_sparse_dict_scores["lr_e"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["lr_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Naive Bayes (Bernoulli)', 'Linear': str(bnlearn_tabu_linear_dict_scores["nb"]),'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["nb"]), 'Sparsity': str(bnlearn_tabu_sparse_dict_scores["nb"]), 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["nb"])})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(bnlearn_tabu_linear_dict_scores["nb_m"]) ,'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["nb_m"]) ,'Sparsity': str(bnlearn_tabu_sparse_dict_scores["nb_m"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["nb_m"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(bnlearn_tabu_linear_dict_scores["nb_g"]) ,'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["nb_g"]),'Sparsity': str(bnlearn_tabu_sparse_dict_scores["nb_g"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["nb_g"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Naive Bayes (Complement)','Linear': str(bnlearn_tabu_linear_dict_scores["nb_c"]),'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["nb_c"]) ,'Sparsity': str(bnlearn_tabu_sparse_dict_scores["nb_c"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["nb_c"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Support Vector Machines (sigmoid)', 'Linear': str(bnlearn_tabu_linear_dict_scores["svm"]),'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["svm"]), 'Sparsity': str(bnlearn_tabu_sparse_dict_scores["svm"]), 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["svm"])})
#thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(bnlearn_tabu_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_tabu_linear_dict_scores["svm_l"])) + "," + str(max(bnlearn_tabu_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(bnlearn_tabu_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_tabu_nonlinear_dict_scores["svm_l"])) + "," + str(max(bnlearn_tabu_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(bnlearn_tabu_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_tabu_sparse_dict_scores["svm_l"])) + "," + str(max(bnlearn_tabu_sparse_dict_scores["svm_l"])) + "}", 'Dimensionality': str(round(mean(bnlearn_tabu_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_tabu_dimension_dict_scores["svm_l"])) + "," + str(max(bnlearn_tabu_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Support Vector Machines (poly)','Linear': str(bnlearn_tabu_linear_dict_scores["svm_po"]) ,'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["svm_po"]) ,'Sparsity': str(bnlearn_tabu_sparse_dict_scores["svm_po"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Support Vector Machines (rbf)','Linear': str(bnlearn_tabu_linear_dict_scores["svm_r"]) ,'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["svm_r"]),'Sparsity': str(bnlearn_tabu_sparse_dict_scores["svm_r"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["svm_r"]) })
#thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(bnlearn_tabu_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_tabu_linear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_tabu_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(bnlearn_tabu_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_tabu_nonlinear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_tabu_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(bnlearn_tabu_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_tabu_sparse_dict_scores["svm_pr"])) + "," + str(max(bnlearn_tabu_sparse_dict_scores["svm_pr"])) + "}", 'Dimensionality': str(round(mean(bnlearn_tabu_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_tabu_dimension_dict_scores["svm_pr"])) + "," + str(max(bnlearn_tabu_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'K Nearest Neighbor (uniform)', 'Linear': str(bnlearn_tabu_linear_dict_scores["knn"]),'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["knn"]), 'Sparsity': str(bnlearn_tabu_sparse_dict_scores["knn"]), 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'BN LEARN (TABU)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(bnlearn_tabu_linear_dict_scores["knn_d"]) ,'Non-linear': str(bnlearn_tabu_nonlinear_dict_scores["knn_d"]) ,'Sparsity': str(bnlearn_tabu_sparse_dict_scores["knn_d"]) , 'Dimensionality': str(bnlearn_tabu_dimension_dict_scores["knn_d"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Decision Tree (gini)','Linear': str(bnlearn_pc_linear_dict_scores["dt"]),'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Decision Tree (entropy)','Linear': str(bnlearn_pc_linear_dict_scores["dt_e"]),'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Random Forest (gini)','Linear': str(bnlearn_pc_linear_dict_scores["rf"]) ,'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Random Forest (entropy)','Linear': str(bnlearn_pc_linear_dict_scores["rf_e"]) ,'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Logistic Regression (penalty-none)','Linear': str(bnlearn_pc_linear_dict_scores["lr"]) ,'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Logistic Regression (l1)','Linear': str(bnlearn_pc_linear_dict_scores["lr_l1"]),'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Logistic Regression (l2)','Linear': str(bnlearn_pc_linear_dict_scores["lr_l2"]) ,'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(bnlearn_pc_linear_dict_scores["lr_e"]),'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Naive Bayes (Bernoulli)','Linear': str(bnlearn_pc_linear_dict_scores["nb"]) ,'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(bnlearn_pc_linear_dict_scores["nb_m"]),'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(bnlearn_pc_linear_dict_scores["nb_g"]) ,'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Naive Bayes (Complement)','Linear': str(bnlearn_pc_linear_dict_scores["nb_c"]) ,'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Support Vector Machines (sigmoid)','Linear': str(bnlearn_pc_linear_dict_scores["svm"]) ,'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(bnlearn_pc_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_pc_linear_dict_scores["svm_l"])) + "," + str(max(bnlearn_pc_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(bnlearn_pc_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_pc_nonlinear_dict_scores["svm_l"])) + "," + str(max(bnlearn_pc_nonlinear_dict_scores["svm_l"])) + "}", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Support Vector Machines (poly)','Linear': str(bnlearn_pc_linear_dict_scores["svm_po"]) ,'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Support Vector Machines (rbf)','Linear': str(bnlearn_pc_linear_dict_scores["svm_r"]) ,'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(bnlearn_pc_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_pc_linear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_pc_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(bnlearn_pc_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_pc_nonlinear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_pc_nonlinear_dict_scores["svm_pr"])) + "}", 'Sparsity': "NA",'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'K Nearest Neighbor (uniform)','Linear': str(bnlearn_pc_linear_dict_scores["knn"]) ,'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (PC)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(bnlearn_pc_linear_dict_scores["knn_d"]) ,'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Decision Tree (gini)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["dt"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["dt"])) + "," + str(max(bnlearn_gs_linear_dict_scores["dt"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Decision Tree (entropy)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["dt_e"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["dt_e"])) + "," + str(max(bnlearn_gs_linear_dict_scores["dt_e"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Random Forest (gini)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["rf"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["rf"])) + "," + str(max(bnlearn_gs_linear_dict_scores["rf"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Random Forest (entropy)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["rf_e"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["rf_e"])) + "," + str(max(bnlearn_gs_linear_dict_scores["rf_e"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Logistic Regression (penalty-none)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["lr"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["lr"])) + "," + str(max(bnlearn_gs_linear_dict_scores["lr"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Logistic Regression (l1)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["lr_l1"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["lr_l1"])) + "," + str(max(bnlearn_gs_linear_dict_scores["lr_l1"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Logistic Regression (l2)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["lr_l2"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["lr_l2"])) + "," + str(max(bnlearn_gs_linear_dict_scores["lr_l2"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["lr_e"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["lr_e"])) + "," + str(max(bnlearn_gs_linear_dict_scores["lr_e"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Naive Bayes (Bernoulli)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["nb"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["nb"])) + "," + str(max(bnlearn_gs_linear_dict_scores["nb"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["nb_m"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["nb_m"])) + "," + str(max(bnlearn_gs_linear_dict_scores["nb_m"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["nb_g"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["nb_g"])) + "," + str(max(bnlearn_gs_linear_dict_scores["nb_g"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Naive Bayes (Complement)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["nb_c"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["nb_c"])) + "," + str(max(bnlearn_gs_linear_dict_scores["nb_c"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Support Vector Machines (sigmoid)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["svm"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["svm"])) + "," + str(max(bnlearn_gs_linear_dict_scores["svm"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
##thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["svm_l"])) + "," + str(max(bnlearn_gs_linear_dict_scores["svm_l"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Support Vector Machines (poly)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["svm_po"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["svm_po"])) + "," + str(max(bnlearn_gs_linear_dict_scores["svm_po"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Support Vector Machines (rbf)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["svm_r"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["svm_r"])) + "," + str(max(bnlearn_gs_linear_dict_scores["svm_r"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
##thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_gs_linear_dict_scores["svm_pr"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'K Nearest Neighbor (uniform)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["knn"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["knn"])) + "," + str(max(bnlearn_gs_linear_dict_scores["knn"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (GS)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(round(mean(bnlearn_gs_linear_dict_scores["knn_d"]), 2)) + " {" + str(min(bnlearn_gs_linear_dict_scores["knn_d"])) + "," + str(max(bnlearn_gs_linear_dict_scores["knn_d"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Decision Tree (gini)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["dt"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["dt"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["dt"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Decision Tree (entropy)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["dt_e"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["dt_e"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["dt_e"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Random Forest (gini)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["rf"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["rf"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["rf"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Random Forest (entropy)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["rf_e"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["rf_e"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["rf_e"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Logistic Regression (penalty-none)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["lr"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["lr"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["lr"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Logistic Regression (l1)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["lr_l1"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["lr_l1"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["lr_l1"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Logistic Regression (l2)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["lr_l2"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["lr_l2"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["lr_l2"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["lr_e"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["lr_e"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["lr_e"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Naive Bayes (Bernoulli)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["nb"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["nb"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["nb"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["nb_m"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["nb_m"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["nb_m"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["nb_g"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["nb_g"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["nb_g"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Naive Bayes (Complement)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["nb_c"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["nb_c"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["nb_c"])) + "}", 'Non-linear': "NA", 'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Support Vector Machines (sigmoid)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["svm"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["svm"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["svm"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["svm_l"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["svm_l"])) + "}", 'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Support Vector Machines (poly)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["svm_po"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["svm_po"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["svm_po"])) + "}", 'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Support Vector Machines (rbf)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["svm_r"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["svm_r"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["svm_r"])) + "}", 'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["svm_pr"])) + "}", 'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'K Nearest Neighbor (uniform)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["knn"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["knn"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["knn"])) + "}",'Non-linear': "NA",'Sparsity': "NA",'Dimensionality': "NA"})
#thewriter.writerow({'Algorithm': 'BN LEARN (IAMB)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(round(mean(bnlearn_iamb_linear_dict_scores["knn_d"]), 2)) + " {" + str(min(bnlearn_iamb_linear_dict_scores["knn_d"])) + "," + str(max(bnlearn_iamb_linear_dict_scores["knn_d"])) + "}", 'Non-linear': "NA",'Sparsity': "NA", 'Dimensionality': "NA"})
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Decision Tree (gini)','Linear': str(bnlearn_mmhc_linear_dict_scores["dt"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["dt"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["dt"]) ,'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["dt"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Decision Tree (entropy)','Linear': str(bnlearn_mmhc_linear_dict_scores["dt_e"]),'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["dt_e"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["dt_e"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["dt_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Random Forest (gini)','Linear': str(bnlearn_mmhc_linear_dict_scores["rf"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["rf"]),'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["rf"]) ,'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["rf"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Random Forest (entropy)','Linear': str(bnlearn_mmhc_linear_dict_scores["rf_e"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["rf_e"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["rf_e"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["rf_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Logistic Regression (penalty-none)','Linear': str(bnlearn_mmhc_linear_dict_scores["lr"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["lr"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["lr"]) ,'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Logistic Regression (l1)','Linear': str(bnlearn_mmhc_linear_dict_scores["lr_l1"]),'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["lr_l1"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["lr_l1"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["lr_l1"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Logistic Regression (l2)','Linear': str(bnlearn_mmhc_linear_dict_scores["lr_l2"]),'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["lr_l2"]),'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["lr_l2"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["lr_l2"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(bnlearn_mmhc_linear_dict_scores["lr_e"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["lr_e"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["lr_e"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["lr_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Naive Bayes (Bernoulli)','Linear': str(bnlearn_mmhc_linear_dict_scores["nb"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["nb"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["nb"]) ,'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["nb"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(bnlearn_mmhc_linear_dict_scores["nb_m"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["nb_m"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["nb_m"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["nb_m"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(bnlearn_mmhc_linear_dict_scores["nb_g"]),'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["nb_g"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["nb_g"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Naive Bayes (Complement)','Linear': str(bnlearn_mmhc_linear_dict_scores["nb_c"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["nb_c"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["nb_c"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["nb_c"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Support Vector Machines (sigmoid)','Linear': str(bnlearn_mmhc_linear_dict_scores["svm"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["svm"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["svm"]) ,'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["svm"]) })
#thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(bnlearn_mmhc_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_mmhc_linear_dict_scores["svm_l"])) + "," + str(max(bnlearn_mmhc_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(bnlearn_mmhc_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_mmhc_nonlinear_dict_scores["svm_l"])) + "," + str(max(bnlearn_mmhc_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(bnlearn_mmhc_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_mmhc_sparse_dict_scores["svm_l"])) + "," + str(max(bnlearn_mmhc_sparse_dict_scores["svm_l"])) + "}", 'Dimensionality': str(round(mean(bnlearn_mmhc_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_mmhc_dimension_dict_scores["svm_l"])) + "," + str(max(bnlearn_mmhc_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Support Vector Machines (poly)','Linear': str(bnlearn_mmhc_linear_dict_scores["svm_po"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["svm_po"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["svm_po"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["svm_po"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Support Vector Machines (rbf)','Linear': str(bnlearn_mmhc_linear_dict_scores["svm_r"]) ,'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["svm_r"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["svm_r"]) , 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["svm_r"]) })
#thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(bnlearn_mmhc_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_mmhc_linear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_mmhc_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(bnlearn_mmhc_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_mmhc_nonlinear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_mmhc_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(bnlearn_mmhc_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_mmhc_sparse_dict_scores["svm_pr"])) + "," + str(max(bnlearn_mmhc_sparse_dict_scores["svm_pr"])) + "}", 'Dimensionality': str(round(mean(bnlearn_mmhc_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_mmhc_dimension_dict_scores["svm_pr"])) + "," + str(max(bnlearn_mmhc_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'K Nearest Neighbor (uniform)','Linear': str(bnlearn_mmhc_linear_dict_scores["knn"]),'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["knn"]),'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["knn"]),'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["knn"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (MMHC)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(bnlearn_mmhc_linear_dict_scores["knn_d"]),'Non-linear': str(bnlearn_mmhc_nonlinear_dict_scores["knn_d"]) ,'Sparsity': str(bnlearn_mmhc_sparse_dict_scores["knn_d"]), 'Dimensionality': str(bnlearn_mmhc_dimension_dict_scores["knn_d"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Decision Tree (gini)','Linear': str(bnlearn_rsmax2_linear_dict_scores["dt"]),'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["dt"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["dt"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["dt"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Decision Tree (entropy)','Linear': str(bnlearn_rsmax2_linear_dict_scores["dt_e"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["dt_e"]),'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["dt_e"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["dt_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Random Forest (gini)','Linear': str(bnlearn_rsmax2_linear_dict_scores["rf"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["rf"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["rf"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Random Forest (entropy)','Linear': str(bnlearn_rsmax2_linear_dict_scores["rf_e"]),'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["rf_e"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["rf_e"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["rf_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Logistic Regression (penalty-none)','Linear': str(bnlearn_rsmax2_linear_dict_scores["lr"]),'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["lr"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["lr"]), 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["lr"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Logistic Regression (l1)','Linear': str(bnlearn_rsmax2_linear_dict_scores["lr_l1"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["lr_l1"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["lr_l1"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["lr_l1"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Logistic Regression (l2)','Linear': str(bnlearn_rsmax2_linear_dict_scores["lr_l2"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["lr_l2"]),'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["lr_l2"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["lr_l2"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(bnlearn_rsmax2_linear_dict_scores["lr_e"]),'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["lr_e"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["lr_e"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["lr_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Naive Bayes (Bernoulli)','Linear': str(bnlearn_rsmax2_linear_dict_scores["nb"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["nb"]),'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["nb"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["nb"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(bnlearn_rsmax2_linear_dict_scores["nb_m"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["nb_m"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["nb_m"]), 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(bnlearn_rsmax2_linear_dict_scores["nb_g"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["nb_g"]),'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["nb_g"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["nb_g"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Naive Bayes (Complement)','Linear': str(bnlearn_rsmax2_linear_dict_scores["nb_c"]),'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["nb_c"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["nb_c"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["nb_c"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Support Vector Machines (sigmoid)','Linear': str(bnlearn_rsmax2_linear_dict_scores["svm"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["svm"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["svm"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["svm"]) })
#thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(bnlearn_rsmax2_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_rsmax2_linear_dict_scores["svm_l"])) + "," + str(max(bnlearn_rsmax2_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(bnlearn_rsmax2_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_rsmax2_nonlinear_dict_scores["svm_l"])) + "," + str(max(bnlearn_rsmax2_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(bnlearn_rsmax2_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_rsmax2_sparse_dict_scores["svm_l"])) + "," + str(max(bnlearn_rsmax2_sparse_dict_scores["svm_l"])) + "}", 'Dimensionality': str(round(mean(bnlearn_rsmax2_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_rsmax2_dimension_dict_scores["svm_l"])) + "," + str(max(bnlearn_rsmax2_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Support Vector Machines (poly)','Linear': str(bnlearn_rsmax2_linear_dict_scores["svm_po"]),'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["svm_po"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["svm_po"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["svm_po"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Support Vector Machines (rbf)','Linear': str(bnlearn_rsmax2_linear_dict_scores["svm_r"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["svm_r"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["svm_r"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["svm_r"])})
#thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(bnlearn_rsmax2_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_rsmax2_linear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_rsmax2_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(bnlearn_rsmax2_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_rsmax2_nonlinear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_rsmax2_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(bnlearn_rsmax2_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_rsmax2_sparse_dict_scores["svm_pr"])) + "," + str(max(bnlearn_rsmax2_sparse_dict_scores["svm_pr"])) + "}", 'Dimensionality': str(round(mean(bnlearn_rsmax2_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_rsmax2_dimension_dict_scores["svm_pr"])) + "," + str(max(bnlearn_rsmax2_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'K Nearest Neighbor (uniform)','Linear': str(bnlearn_rsmax2_linear_dict_scores["knn"]) ,'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["knn"]) ,'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["knn"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'BN LEARN (RSMAX2)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(bnlearn_rsmax2_linear_dict_scores["knn_d"]),'Non-linear': str(bnlearn_rsmax2_nonlinear_dict_scores["knn_d"]),'Sparsity': str(bnlearn_rsmax2_sparse_dict_scores["knn_d"]) , 'Dimensionality': str(bnlearn_rsmax2_dimension_dict_scores["knn_d"])})
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Decision Tree (gini)','Linear': str(bnlearn_h2pc_linear_dict_scores["dt"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["dt"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["dt"]), 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["dt"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Decision Tree (entropy)','Linear': str(bnlearn_h2pc_linear_dict_scores["dt_e"]),'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["dt_e"]),'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["dt_e"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["dt_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Random Forest (gini)','Linear': str(bnlearn_h2pc_linear_dict_scores["rf"]),'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["rf"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["rf"]), 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Random Forest (entropy)','Linear': str(bnlearn_h2pc_linear_dict_scores["rf_e"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["rf_e"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["rf_e"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["rf_e"])})
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Logistic Regression (penalty-none)','Linear': str(bnlearn_h2pc_linear_dict_scores["lr"]),'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["lr"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["lr"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["lr"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Logistic Regression (l1)','Linear': str(bnlearn_h2pc_linear_dict_scores["lr_l1"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["lr_l1"]),'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["lr_l1"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["lr_l1"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Logistic Regression (l2)','Linear': str(bnlearn_h2pc_linear_dict_scores["lr_l2"]),'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["lr_l2"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["lr_l2"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["lr_l2"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Logistic Regression (elasticnet)','Linear': str(bnlearn_h2pc_linear_dict_scores["lr_e"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["lr_e"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["lr_e"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["lr_e"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Naive Bayes (Bernoulli)','Linear': str(bnlearn_h2pc_linear_dict_scores["nb"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["nb"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["nb"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["nb"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Naive Bayes (Multinomial)','Linear': str(bnlearn_h2pc_linear_dict_scores["nb_m"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["nb_m"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["nb_m"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Naive Bayes (Gaussian)','Linear': str(bnlearn_h2pc_linear_dict_scores["nb_g"]),'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["nb_g"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["nb_g"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["nb_g"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Naive Bayes (Complement)','Linear': str(bnlearn_h2pc_linear_dict_scores["nb_c"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["nb_c"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["nb_c"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["nb_c"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Support Vector Machines (sigmoid)','Linear': str(bnlearn_h2pc_linear_dict_scores["svm"]),'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["svm"]),'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["svm"]), 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["svm"])})
#thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(bnlearn_h2pc_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_h2pc_linear_dict_scores["svm_l"])) + "," + str(max(bnlearn_h2pc_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(bnlearn_h2pc_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_h2pc_nonlinear_dict_scores["svm_l"])) + "," + str(max(bnlearn_h2pc_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(bnlearn_h2pc_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_h2pc_sparse_dict_scores["svm_l"])) + "," + str(max(bnlearn_h2pc_sparse_dict_scores["svm_l"])) + "}", 'Dimensionality': str(round(mean(bnlearn_h2pc_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(bnlearn_h2pc_dimension_dict_scores["svm_l"])) + "," + str(max(bnlearn_h2pc_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Support Vector Machines (poly)','Linear': str(bnlearn_h2pc_linear_dict_scores["svm_po"]),'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["svm_po"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["svm_po"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["svm_po"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Support Vector Machines (rbf)','Linear': str(bnlearn_h2pc_linear_dict_scores["svm_r"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["svm_r"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["svm_r"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["svm_r"]) })
#thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(bnlearn_h2pc_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_h2pc_linear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_h2pc_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(bnlearn_h2pc_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_h2pc_nonlinear_dict_scores["svm_pr"])) + "," + str(max(bnlearn_h2pc_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(bnlearn_h2pc_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_h2pc_sparse_dict_scores["svm_pr"])) + "," + str(max(bnlearn_h2pc_sparse_dict_scores["svm_pr"])) + "}", 'Dimensionality': str(round(mean(bnlearn_h2pc_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(bnlearn_h2pc_dimension_dict_scores["svm_pr"])) + "," + str(max(bnlearn_h2pc_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'K Nearest Neighbor (uniform)','Linear': str(bnlearn_h2pc_linear_dict_scores["knn"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["knn"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["knn"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["knn"]) })
thewriter.writerow({'Algorithm': 'BN LEARN (H2PC)', 'Model': 'K Nearest Neighbor (distance)','Linear': str(bnlearn_h2pc_linear_dict_scores["knn_d"]) ,'Non-linear': str(bnlearn_h2pc_nonlinear_dict_scores["knn_d"]) ,'Sparsity': str(bnlearn_h2pc_sparse_dict_scores["knn_d"]) , 'Dimensionality': str(bnlearn_h2pc_dimension_dict_scores["knn_d"]) })
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Decision Tree (gini)',
'Linear': str(pomegranate_exact_linear_dict_scores["dt"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["dt"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["dt"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Decision Tree (entropy)',
'Linear': str(pomegranate_exact_linear_dict_scores["dt_e"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["dt_e"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["dt_e"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["dt_e"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Random Forest (gini)',
'Linear': str(pomegranate_exact_linear_dict_scores["rf"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["rf"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["rf"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Random Forest (entropy)',
'Linear': str(pomegranate_exact_linear_dict_scores["rf_e"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["rf_e"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["rf_e"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["rf_e"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Logistic Regression (penalty-none)',
'Linear': str(pomegranate_exact_linear_dict_scores["lr"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["lr"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["lr"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Logistic Regression (l1)',
'Linear': str(pomegranate_exact_linear_dict_scores["lr_l1"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["lr_l1"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["lr_l1"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["lr_l1"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Logistic Regression (l2)',
'Linear': str(pomegranate_exact_linear_dict_scores["lr_l2"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["lr_l2"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["lr_l2"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Logistic Regression (elasticnet)',
'Linear': str(pomegranate_exact_linear_dict_scores["lr_e"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["lr_e"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["lr_e"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["lr_e"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Naive Bayes (Bernoulli)',
'Linear': str(pomegranate_exact_linear_dict_scores["nb"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["nb"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["nb"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["nb"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Naive Bayes (Multinomial)',
'Linear': str(pomegranate_exact_linear_dict_scores["nb_m"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["nb_m"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["nb_m"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Naive Bayes (Gaussian)',
'Linear': str(pomegranate_exact_linear_dict_scores["nb_g"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["nb_g"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["nb_g"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Naive Bayes (Complement)',
'Linear': str(pomegranate_exact_linear_dict_scores["nb_c"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["nb_c"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["nb_c"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["nb_c"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Support Vector Machines (sigmoid)',
'Linear': str(pomegranate_exact_linear_dict_scores["svm"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["svm"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["svm"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["svm"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_l"])) + "," + str(max(notears_l2_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_l"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_l"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_l"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_l"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Support Vector Machines (poly)',
'Linear': str(pomegranate_exact_linear_dict_scores["svm_po"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["svm_po"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["svm_po"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'Support Vector Machines (rbf)',
'Linear': str(pomegranate_exact_linear_dict_scores["svm_r"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["svm_r"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["svm_r"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["svm_r"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_pr"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_pr"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_pr"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'K Nearest Neighbor (uniform)',
'Linear': str(pomegranate_exact_linear_dict_scores["knn"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["knn"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["knn"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Exact)', 'Model': 'K Nearest Neighbor (distance)',
'Linear': str(pomegranate_exact_linear_dict_scores["knn_d"]),
'Non-linear': str(pomegranate_exact_nonlinear_dict_scores["knn_d"]),
'Sparsity': str(pomegranate_exact_sparse_dict_scores["knn_d"]),
'Dimensionality': str(pomegranate_exact_dimension_dict_scores["knn_d"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Decision Tree (gini)',
'Linear': str(pomegranate_greedy_linear_dict_scores["dt"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["dt"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["dt"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Decision Tree (entropy)',
'Linear': str(pomegranate_greedy_linear_dict_scores["dt_e"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["dt_e"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["dt_e"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["dt_e"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Random Forest (gini)',
'Linear': str(pomegranate_greedy_linear_dict_scores["rf"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["rf"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["rf"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Random Forest (entropy)',
'Linear': str(pomegranate_greedy_linear_dict_scores["rf_e"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["rf_e"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["rf_e"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["rf_e"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Logistic Regression (penalty-none)',
'Linear': str(pomegranate_greedy_linear_dict_scores["lr"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["lr"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["lr"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Logistic Regression (l1)',
'Linear': str(pomegranate_greedy_linear_dict_scores["lr_l1"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["lr_l1"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["lr_l1"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["lr_l1"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Logistic Regression (l2)',
'Linear': str(pomegranate_greedy_linear_dict_scores["lr_l2"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["lr_l2"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["lr_l2"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Logistic Regression (elasticnet)',
'Linear': str(pomegranate_greedy_linear_dict_scores["lr_e"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["lr_e"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["lr_e"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["lr_e"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Naive Bayes (Bernoulli)',
'Linear': str(pomegranate_greedy_linear_dict_scores["nb"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["nb"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["nb"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["nb"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Naive Bayes (Multinomial)',
'Linear': str(pomegranate_greedy_linear_dict_scores["nb_m"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["nb_m"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["nb_m"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Naive Bayes (Gaussian)',
'Linear': str(pomegranate_greedy_linear_dict_scores["nb_g"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["nb_g"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["nb_g"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Naive Bayes (Complement)',
'Linear': str(pomegranate_greedy_linear_dict_scores["nb_c"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["nb_c"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["nb_c"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["nb_c"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Support Vector Machines (sigmoid)',
'Linear': str(pomegranate_greedy_linear_dict_scores["svm"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["svm"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["svm"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["svm"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_l"])) + "," + str(max(notears_l2_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_l"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_l"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_l"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_l"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Support Vector Machines (poly)',
'Linear': str(pomegranate_greedy_linear_dict_scores["svm_po"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["svm_po"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["svm_po"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'Support Vector Machines (rbf)',
'Linear': str(pomegranate_greedy_linear_dict_scores["svm_r"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["svm_r"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["svm_r"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["svm_r"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_pr"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_pr"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_pr"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'K Nearest Neighbor (uniform)',
'Linear': str(pomegranate_greedy_linear_dict_scores["knn"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["knn"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["knn"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'Pomegranate (Greedy)', 'Model': 'K Nearest Neighbor (distance)',
'Linear': str(pomegranate_greedy_linear_dict_scores["knn_d"]),
'Non-linear': str(pomegranate_greedy_nonlinear_dict_scores["knn_d"]),
'Sparsity': str(pomegranate_greedy_sparse_dict_scores["knn_d"]),
'Dimensionality': str(pomegranate_greedy_dimension_dict_scores["knn_d"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Decision Tree (gini)',
'Linear': str(pgmpy_hc_linear_dict_scores["dt"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["dt"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["dt"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Decision Tree (entropy)',
'Linear': str(pgmpy_hc_linear_dict_scores["dt_e"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["dt_e"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["dt_e"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["dt_e"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Random Forest (gini)',
'Linear': str(pgmpy_hc_linear_dict_scores["rf"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["rf"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["rf"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Random Forest (entropy)',
'Linear': str(pgmpy_hc_linear_dict_scores["rf_e"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["rf_e"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["rf_e"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["rf_e"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Logistic Regression (penalty-none)',
'Linear': str(pgmpy_hc_linear_dict_scores["lr"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["lr"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["lr"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Logistic Regression (l1)',
'Linear': str(pgmpy_hc_linear_dict_scores["lr_l1"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["lr_l1"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["lr_l1"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["lr_l1"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Logistic Regression (l2)',
'Linear': str(pgmpy_hc_linear_dict_scores["lr_l2"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["lr_l2"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["lr_l2"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Logistic Regression (elasticnet)',
'Linear': str(pgmpy_hc_linear_dict_scores["lr_e"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["lr_e"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["lr_e"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["lr_e"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Naive Bayes (Bernoulli)',
'Linear': str(pgmpy_hc_linear_dict_scores["nb"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["nb"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["nb"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["nb"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Naive Bayes (Multinomial)',
'Linear': str(pgmpy_hc_linear_dict_scores["nb_m"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["nb_m"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["nb_m"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Naive Bayes (Gaussian)',
'Linear': str(pgmpy_hc_linear_dict_scores["nb_g"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["nb_g"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["nb_g"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Naive Bayes (Complement)',
'Linear': str(pgmpy_hc_linear_dict_scores["nb_c"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["nb_c"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["nb_c"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["nb_c"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Support Vector Machines (sigmoid)',
'Linear': str(pgmpy_hc_linear_dict_scores["svm"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["svm"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["svm"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["svm"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_l"])) + "," + str(max(notears_l2_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_l"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_l"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_l"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_l"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Support Vector Machines (poly)',
'Linear': str(pgmpy_hc_linear_dict_scores["svm_po"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["svm_po"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["svm_po"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'Support Vector Machines (rbf)',
'Linear': str(pgmpy_hc_linear_dict_scores["svm_r"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["svm_r"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["svm_r"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["svm_r"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_pr"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_pr"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_pr"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'K Nearest Neighbor (uniform)',
'Linear': str(pgmpy_hc_linear_dict_scores["knn"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["knn"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["knn"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'PGMPY (HC)', 'Model': 'K Nearest Neighbor (distance)',
'Linear': str(pgmpy_hc_linear_dict_scores["knn_d"]),
'Non-linear': str(pgmpy_hc_nonlinear_dict_scores["knn_d"]),
'Sparsity': str(pgmpy_hc_sparse_dict_scores["knn_d"]),
'Dimensionality': str(pgmpy_hc_dimension_dict_scores["knn_d"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Decision Tree (gini)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["dt"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["dt"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["dt"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Decision Tree (entropy)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["dt_e"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["dt_e"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["dt_e"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["dt_e"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Random Forest (gini)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["rf"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["rf"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["rf"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Random Forest (entropy)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["rf_e"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["rf_e"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["rf_e"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["rf_e"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Logistic Regression (penalty-none)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["lr"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["lr"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["lr"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Logistic Regression (l1)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["lr_l1"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["lr_l1"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["lr_l1"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["lr_l1"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Logistic Regression (l2)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["lr_l2"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["lr_l2"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["lr_l2"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Logistic Regression (elasticnet)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["lr_e"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["lr_e"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["lr_e"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["lr_e"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Naive Bayes (Bernoulli)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["nb"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["nb"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["nb"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["nb"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Naive Bayes (Multinomial)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["nb_m"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["nb_m"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["nb_m"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Naive Bayes (Gaussian)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["nb_g"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["nb_g"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["nb_g"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Naive Bayes (Complement)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["nb_c"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["nb_c"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["nb_c"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["nb_c"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Support Vector Machines (sigmoid)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["svm"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["svm"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["svm"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["svm"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_l"])) + "," + str(max(notears_l2_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_l"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_l"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_l"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_l"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Support Vector Machines (poly)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["svm_po"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["svm_po"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["svm_po"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'Support Vector Machines (rbf)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["svm_r"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["svm_r"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["svm_r"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["svm_r"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_pr"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_pr"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_pr"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'K Nearest Neighbor (uniform)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["knn"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["knn"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["knn"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'PGMPY (MMHC)', 'Model': 'K Nearest Neighbor (distance)',
'Linear': str(pgmpy_mmhc_linear_dict_scores["knn_d"]),
'Non-linear': str(pgmpy_mmhc_nonlinear_dict_scores["knn_d"]),
'Sparsity': str(pgmpy_mmhc_sparse_dict_scores["knn_d"]),
'Dimensionality': str(pgmpy_mmhc_dimension_dict_scores["knn_d"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Decision Tree (gini)',
'Linear': str(pgmpy_tree_linear_dict_scores["dt"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["dt"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["dt"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["dt"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Decision Tree (entropy)',
'Linear': str(pgmpy_tree_linear_dict_scores["dt_e"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["dt_e"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["dt_e"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["dt_e"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Random Forest (gini)',
'Linear': str(pgmpy_tree_linear_dict_scores["rf"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["rf"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["rf"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["rf"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Random Forest (entropy)',
'Linear': str(pgmpy_tree_linear_dict_scores["rf_e"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["rf_e"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["rf_e"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["rf_e"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Logistic Regression (penalty-none)',
'Linear': str(pgmpy_tree_linear_dict_scores["lr"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["lr"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["lr"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["lr"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Logistic Regression (l1)',
'Linear': str(pgmpy_tree_linear_dict_scores["lr_l1"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["lr_l1"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["lr_l1"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["lr_l1"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Logistic Regression (l2)',
'Linear': str(pgmpy_tree_linear_dict_scores["lr_l2"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["lr_l2"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["lr_l2"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["lr_l2"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Logistic Regression (elasticnet)',
'Linear': str(pgmpy_tree_linear_dict_scores["lr_e"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["lr_e"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["lr_e"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["lr_e"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Naive Bayes (Bernoulli)',
'Linear': str(pgmpy_tree_linear_dict_scores["nb"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["nb"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["nb"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["nb"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Naive Bayes (Multinomial)',
'Linear': str(pgmpy_tree_linear_dict_scores["nb_m"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["nb_m"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["nb_m"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["nb_m"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Naive Bayes (Gaussian)',
'Linear': str(pgmpy_tree_linear_dict_scores["nb_g"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["nb_g"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["nb_g"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["nb_g"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Naive Bayes (Complement)',
'Linear': str(pgmpy_tree_linear_dict_scores["nb_c"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["nb_c"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["nb_c"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["nb_c"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Support Vector Machines (sigmoid)',
'Linear': str(pgmpy_tree_linear_dict_scores["svm"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["svm"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["svm"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["svm"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (linear)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_l"])) + "," + str(max(notears_l2_linear_dict_scores["svm_l"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_l"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_l"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_l"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_l"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_l"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_l"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_l"])) + "}"})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Support Vector Machines (poly)',
'Linear': str(pgmpy_tree_linear_dict_scores["svm_po"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["svm_po"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["svm_po"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["svm_po"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'Support Vector Machines (rbf)',
'Linear': str(pgmpy_tree_linear_dict_scores["svm_r"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["svm_r"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["svm_r"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["svm_r"])})
# thewriter.writerow({'Algorithm': 'NO TEARS (Loss-L2)', 'Model': 'Support Vector Machines (precomputed)','Linear': str(round(mean(notears_l2_linear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_linear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_linear_dict_scores["svm_pr"])) + "}",'Non-linear': str(round(mean(notears_l2_nonlinear_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_nonlinear_dict_scores["svm_pr"])) + "," + str(max(notears_l2_nonlinear_dict_scores["svm_pr"])) + "}",'Sparsity': str(round(mean(notears_l2_sparse_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_sparse_dict_scores["svm_pr"])) + "," + str(max(notears_l2_sparse_dict_scores["svm_pr"])) + "}",'Dimensionality': str(round(mean(notears_l2_dimension_dict_scores["svm_pr"]), 2)) + " {" + str(min(notears_l2_dimension_dict_scores["svm_pr"])) + "," + str(max(notears_l2_dimension_dict_scores["svm_pr"])) + "}"})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'K Nearest Neighbor (uniform)',
'Linear': str(pgmpy_tree_linear_dict_scores["knn"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["knn"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["knn"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["knn"])})
thewriter.writerow({'Algorithm': 'PGMPY (TREE)', 'Model': 'K Nearest Neighbor (distance)',
'Linear': str(pgmpy_tree_linear_dict_scores["knn_d"]),
'Non-linear': str(pgmpy_tree_nonlinear_dict_scores["knn_d"]),
'Sparsity': str(pgmpy_tree_sparse_dict_scores["knn_d"]),
'Dimensionality': str(pgmpy_tree_dimension_dict_scores["knn_d"])})
write_learned_to_csv()
def write_real_to_csv():
experiments = ['Model', 'Linear', 'Non-linear', 'Sparsity', 'Dimensionality']
with open('real_experiments_summary.csv', 'w', newline='') as csvfile:
fieldnames = ['Model', 'Linear', 'Non-linear', 'Sparsity', 'Dimensionality']
thewriter = csv.DictWriter(csvfile, fieldnames=fieldnames)
thewriter.writeheader()
thewriter.writerow({'Model': 'Decision Tree (gini)','Linear': str(real_linear_dt_scores), 'Non-linear': str(real_nonlinear_dt_scores), 'Sparsity': str(real_sparse_dt_scores), 'Dimensionality': str(real_dimension_dt_scores)})
thewriter.writerow({'Model': 'Decision Tree (entropy)', 'Linear': str(real_linear_dt_entropy_scores),'Non-linear': str(real_nonlinear_dt_entropy_scores),'Sparsity': str(real_sparse_dt_entropy_scores),'Dimensionality': str(real_dimension_dt_entropy_scores)})
thewriter.writerow({'Model': 'Random Forest (gini)', 'Linear': str(real_linear_rf_scores), 'Non-linear': str(real_nonlinear_rf_scores), 'Sparsity': str(real_sparse_rf_scores), 'Dimensionality': str(real_dimension_rf_scores)})
thewriter.writerow({'Model': 'Random Forest (entropy)', 'Linear': str(real_linear_rf_entropy_scores),'Non-linear': str(real_nonlinear_rf_entropy_scores),'Sparsity': str(real_sparse_rf_entropy_scores),'Dimensionality': str(real_dimension_rf_entropy_scores)})
thewriter.writerow({'Model': 'Logistic Regression (penalty-none)', 'Linear': str(real_linear_lr_scores), 'Non-linear': str(real_nonlinear_lr_scores), 'Sparsity': str(real_sparse_lr_scores), 'Dimensionality': str(real_dimension_lr_scores)})
thewriter.writerow({'Model': 'Logistic Regression (l1)', 'Linear': str(real_linear_lr_l1_scores),'Non-linear': str(real_nonlinear_lr_l1_scores),'Sparsity': str(real_sparse_lr_l1_scores),'Dimensionality': str(real_dimension_lr_l1_scores)})
thewriter.writerow({'Model': 'Logistic Regression (l2)', 'Linear': str(real_linear_lr_l2_scores),'Non-linear': str(real_nonlinear_lr_l2_scores),'Sparsity': str(real_sparse_lr_l2_scores),'Dimensionality': str(real_dimension_lr_l2_scores)})
thewriter.writerow({'Model': 'Logistic Regression (elasticnet)', 'Linear': str(real_linear_lr_elastic_scores),'Non-linear': str(real_nonlinear_lr_elastic_scores),'Sparsity': str(real_sparse_lr_elastic_scores),'Dimensionality': str(real_dimension_lr_elastic_scores)})
thewriter.writerow({'Model': 'Naive Bayes (Bernoulli)', 'Linear': str(real_linear_gb_scores),'Non-linear': str(real_nonlinear_gb_scores), 'Sparsity': str(real_sparse_gb_scores), 'Dimensionality': str(real_dimension_gb_scores)})
thewriter.writerow({'Model': 'Naive Bayes (Multinomial)', 'Linear': str(real_linear_gb_multi_scores),'Non-linear': str(real_nonlinear_gb_multi_scores) ,'Sparsity': str(real_sparse_gb_multi_scores),'Dimensionality': str(real_dimension_gb_multi_scores)})
thewriter.writerow({'Model': 'Naive Bayes (Gaussian)','Linear': str(real_linear_gb_gaussian_scores),'Non-linear': str(real_nonlinear_gb_gaussian_scores),'Sparsity': str(real_sparse_gb_gaussian_scores),'Dimensionality': str(real_dimension_gb_gaussian_scores)})
thewriter.writerow({'Model': 'Naive Bayes (Complement)','Linear': str(real_linear_gb_complement_scores),'Non-linear': str(real_nonlinear_gb_complement_scores),'Sparsity': str(real_sparse_gb_complement_scores) ,'Dimensionality': str(real_dimension_gb_complement_scores)})
thewriter.writerow({'Model': 'Support Vector Machines (sigmoid)', 'Linear': str(real_linear_svm_scores),'Non-linear': str(real_nonlinear_svm_scores), 'Sparsity': str(real_sparse_svm_scores), 'Dimensionality': str(real_dimension_svm_scores)})
#thewriter.writerow({'Model': 'Support Vector Machines (linear)','Linear': str(mean(real_linear_svm_linear_scores)) + " {" + str(min(real_linear_svm_linear_scores)) + "," + str(max(real_linear_svm_linear_scores)) + "}",'Non-linear': str(mean(real_nonlinear_svm_linear_scores)) + " {" + str(min(real_nonlinear_svm_linear_scores)) + "," + str(max(real_nonlinear_svm_linear_scores)) + "}",'Sparsity': str(mean(real_sparse_svm_linear_scores)) + " {" + str(min(real_sparse_svm_linear_scores)) + "," + str(max(real_sparse_svm_linear_scores)) + "}",'Dimensionality': str(mean(real_dimension_svm_linear_scores)) + " {" + str(min(real_dimension_svm_linear_scores)) + "," + str(max(real_dimension_svm_linear_scores)) + "}"})
thewriter.writerow({'Model': 'Support Vector Machines (poly)','Linear': str(real_linear_svm_poly_scores),'Non-linear': str(real_nonlinear_svm_poly_scores) ,'Sparsity': str(real_sparse_svm_poly_scores),'Dimensionality': str(real_dimension_svm_poly_scores)})
thewriter.writerow({'Model': 'Support Vector Machines (rbf)','Linear': str(real_linear_svm_rbf_scores),'Non-linear': str(real_nonlinear_svm_rbf_scores) ,'Sparsity': str(real_sparse_svm_rbf_scores),'Dimensionality': str(real_dimension_svm_rbf_scores)})
#thewriter.writerow({'Model': 'Support Vector Machines (precomputed)','Linear': str(mean(real_linear_svm_precomputed_scores)) + " {" + str(min(real_linear_svm_precomputed_scores)) + "," + str(max(real_linear_svm_precomputed_scores)) + "}",'Non-linear': str(mean(real_nonlinear_svm_precomputed_scores)) + " {" + str(min(real_nonlinear_svm_precomputed_scores)) + "," + str(max(real_nonlinear_svm_precomputed_scores)) + "}",'Sparsity': str(mean(real_sparse_svm_precomputed_scores)) + " {" + str(min(real_sparse_svm_precomputed_scores)) + "," + str(max(real_sparse_svm_precomputed_scores)) + "}",'Dimensionality': str(mean(real_dimension_svm_precomputed_scores)) + " {" + str(min(real_dimension_svm_precomputed_scores)) + "," + str(max(real_dimension_svm_precomputed_scores)) + "}"})
thewriter.writerow({'Model': 'K Nearest Neighbor (uniform)', 'Linear': str(real_linear_knn_scores),'Non-linear': str(real_nonlinear_knn_scores), 'Sparsity': str(real_sparse_knn_scores), 'Dimensionality': str(real_dimension_knn_scores)})
thewriter.writerow({'Model': 'K Nearest Neighbor (distance)', 'Linear': str(real_linear_knn_distance_scores),'Non-linear': str(real_nonlinear_knn_distance_scores), 'Sparsity': str(real_sparse_knn_distance_scores), 'Dimensionality': str(real_dimension_knn_distance_scores)})
write_real_to_csv()
def write_real_to_figures():
# Produce Linear Problem by Library on Problem (test set from real world)
# Group by figure
labels = ['DT_G', 'DT_E', 'RF_G', 'RF_E', 'LR', 'LR_L1', 'LR_L2', 'LR_E', 'NB_B', 'NB_G', 'NB_M', 'NB_C', 'SVM_S',
'SVM_P', 'SVM_R', 'KNN_W', 'KNN_D']
bn_means = [bnlearn_linear_dict_scores["dt"], bnlearn_linear_dict_scores["dt_e"], bnlearn_linear_dict_scores["rf"], bnlearn_linear_dict_scores["rf_e"], bnlearn_linear_dict_scores["lr"], bnlearn_linear_dict_scores["lr_l1"], bnlearn_linear_dict_scores["lr_l2"], bnlearn_linear_dict_scores["lr_e"], bnlearn_linear_dict_scores["nb"], bnlearn_linear_dict_scores["nb_g"], bnlearn_linear_dict_scores["nb_m"], bnlearn_linear_dict_scores["nb_c"], bnlearn_linear_dict_scores["svm"], bnlearn_linear_dict_scores["svm_po"], bnlearn_linear_dict_scores["svm_r"], bnlearn_linear_dict_scores["knn"], bnlearn_linear_dict_scores["knn_d"]]
bn_tabu_means = [bnlearn_tabu_linear_dict_scores["dt"], bnlearn_tabu_linear_dict_scores["dt_e"],
bnlearn_tabu_linear_dict_scores["rf"], bnlearn_tabu_linear_dict_scores["rf_e"],
bnlearn_tabu_linear_dict_scores["lr"], bnlearn_tabu_linear_dict_scores["lr_l1"],
bnlearn_tabu_linear_dict_scores["lr_l2"], bnlearn_tabu_linear_dict_scores["lr_e"],
bnlearn_tabu_linear_dict_scores["nb"], bnlearn_tabu_linear_dict_scores["nb_g"],
bnlearn_tabu_linear_dict_scores["nb_m"], bnlearn_tabu_linear_dict_scores["nb_c"],
bnlearn_tabu_linear_dict_scores["svm"], bnlearn_tabu_linear_dict_scores["svm_po"],
bnlearn_tabu_linear_dict_scores["svm_r"], bnlearn_tabu_linear_dict_scores["knn"],
bnlearn_tabu_linear_dict_scores["knn_d"]]
bn_pc_means = [bnlearn_pc_linear_dict_scores["dt"], bnlearn_pc_linear_dict_scores["dt_e"],
bnlearn_pc_linear_dict_scores["rf"], bnlearn_pc_linear_dict_scores["rf_e"],
bnlearn_pc_linear_dict_scores["lr"], bnlearn_pc_linear_dict_scores["lr_l1"],
bnlearn_pc_linear_dict_scores["lr_l2"], bnlearn_pc_linear_dict_scores["lr_e"],
bnlearn_pc_linear_dict_scores["nb"], bnlearn_pc_linear_dict_scores["nb_g"],
bnlearn_pc_linear_dict_scores["nb_m"], bnlearn_pc_linear_dict_scores["nb_c"],
bnlearn_pc_linear_dict_scores["svm"], bnlearn_pc_linear_dict_scores["svm_po"],
bnlearn_pc_linear_dict_scores["svm_r"], bnlearn_pc_linear_dict_scores["knn"],
bnlearn_pc_linear_dict_scores["knn_d"]]
bn_mmhc_means = [bnlearn_mmhc_linear_dict_scores["dt"], bnlearn_mmhc_linear_dict_scores["dt_e"],
bnlearn_mmhc_linear_dict_scores["rf"], bnlearn_mmhc_linear_dict_scores["rf_e"],
bnlearn_mmhc_linear_dict_scores["lr"], bnlearn_mmhc_linear_dict_scores["lr_l1"],
bnlearn_mmhc_linear_dict_scores["lr_l2"], bnlearn_mmhc_linear_dict_scores["lr_e"],
bnlearn_mmhc_linear_dict_scores["nb"], bnlearn_mmhc_linear_dict_scores["nb_g"],
bnlearn_mmhc_linear_dict_scores["nb_m"], bnlearn_mmhc_linear_dict_scores["nb_c"],
bnlearn_mmhc_linear_dict_scores["svm"], bnlearn_mmhc_linear_dict_scores["svm_po"],
bnlearn_mmhc_linear_dict_scores["svm_r"], bnlearn_mmhc_linear_dict_scores["knn"],
bnlearn_mmhc_linear_dict_scores["knn_d"]]
bn_rsmax2_means = [bnlearn_rsmax2_linear_dict_scores["dt"], bnlearn_rsmax2_linear_dict_scores["dt_e"],
bnlearn_rsmax2_linear_dict_scores["rf"], bnlearn_rsmax2_linear_dict_scores["rf_e"],
bnlearn_rsmax2_linear_dict_scores["lr"], bnlearn_rsmax2_linear_dict_scores["lr_l1"],
bnlearn_rsmax2_linear_dict_scores["lr_l2"], bnlearn_rsmax2_linear_dict_scores["lr_e"],
bnlearn_rsmax2_linear_dict_scores["nb"], bnlearn_rsmax2_linear_dict_scores["nb_g"],
bnlearn_rsmax2_linear_dict_scores["nb_m"], bnlearn_rsmax2_linear_dict_scores["nb_c"],
bnlearn_rsmax2_linear_dict_scores["svm"], bnlearn_rsmax2_linear_dict_scores["svm_po"],
bnlearn_rsmax2_linear_dict_scores["svm_r"], bnlearn_rsmax2_linear_dict_scores["knn"],
bnlearn_rsmax2_linear_dict_scores["knn_d"]]
bn_h2pc_means = [bnlearn_h2pc_linear_dict_scores["dt"], bnlearn_h2pc_linear_dict_scores["dt_e"],
bnlearn_h2pc_linear_dict_scores["rf"], bnlearn_h2pc_linear_dict_scores["rf_e"],
bnlearn_h2pc_linear_dict_scores["lr"], bnlearn_h2pc_linear_dict_scores["lr_l1"],
bnlearn_h2pc_linear_dict_scores["lr_l2"], bnlearn_h2pc_linear_dict_scores["lr_e"],
bnlearn_h2pc_linear_dict_scores["nb"], bnlearn_h2pc_linear_dict_scores["nb_g"],
bnlearn_h2pc_linear_dict_scores["nb_m"], bnlearn_h2pc_linear_dict_scores["nb_c"],
bnlearn_h2pc_linear_dict_scores["svm"], bnlearn_h2pc_linear_dict_scores["svm_po"],
bnlearn_h2pc_linear_dict_scores["svm_r"], bnlearn_h2pc_linear_dict_scores["knn"],
bnlearn_h2pc_linear_dict_scores["knn_d"]]
nt_means = [notears_linear_dict_scores["dt"], notears_linear_dict_scores["dt_e"], notears_linear_dict_scores["rf"], notears_linear_dict_scores["rf_e"], notears_linear_dict_scores["lr"], notears_linear_dict_scores["lr_l1"], notears_linear_dict_scores["lr_l2"], notears_linear_dict_scores["lr_e"], notears_linear_dict_scores["nb"], notears_linear_dict_scores["nb_g"], notears_linear_dict_scores["nb_m"], notears_linear_dict_scores["nb_c"], notears_linear_dict_scores["svm"], notears_linear_dict_scores["svm_po"], notears_linear_dict_scores["svm_r"], notears_linear_dict_scores["knn"], notears_linear_dict_scores["knn_d"]]
nt_l2_means = [notears_l2_linear_dict_scores["dt"], notears_l2_linear_dict_scores["dt_e"],
notears_l2_linear_dict_scores["rf"], notears_l2_linear_dict_scores["rf_e"],
notears_l2_linear_dict_scores["lr"], notears_l2_linear_dict_scores["lr_l1"],
notears_l2_linear_dict_scores["lr_l2"], notears_l2_linear_dict_scores["lr_e"],
notears_l2_linear_dict_scores["nb"], notears_l2_linear_dict_scores["nb_g"],
notears_l2_linear_dict_scores["nb_m"], notears_l2_linear_dict_scores["nb_c"],
notears_l2_linear_dict_scores["svm"], notears_l2_linear_dict_scores["svm_po"],
notears_l2_linear_dict_scores["svm_r"], notears_l2_linear_dict_scores["knn"],
notears_l2_linear_dict_scores["knn_d"]]
nt_p_means = [notears_poisson_linear_dict_scores["dt"], notears_poisson_linear_dict_scores["dt_e"],
notears_poisson_linear_dict_scores["rf"], notears_poisson_linear_dict_scores["rf_e"],
notears_poisson_linear_dict_scores["lr"], notears_poisson_linear_dict_scores["lr_l1"],
notears_poisson_linear_dict_scores["lr_l2"], notears_poisson_linear_dict_scores["lr_e"],
notears_poisson_linear_dict_scores["nb"], notears_poisson_linear_dict_scores["nb_g"],
notears_poisson_linear_dict_scores["nb_m"], notears_poisson_linear_dict_scores["nb_c"],
notears_poisson_linear_dict_scores["svm"], notears_poisson_linear_dict_scores["svm_po"],
notears_poisson_linear_dict_scores["svm_r"], notears_poisson_linear_dict_scores["knn"],
notears_poisson_linear_dict_scores["knn_d"]]
p_means = [pomegranate_exact_linear_dict_scores["dt"], pomegranate_exact_linear_dict_scores["dt_e"], pomegranate_exact_linear_dict_scores["rf"], pomegranate_exact_linear_dict_scores["rf_e"], pomegranate_exact_linear_dict_scores["lr"], pomegranate_exact_linear_dict_scores["lr_l1"], pomegranate_exact_linear_dict_scores["lr_l2"], pomegranate_exact_linear_dict_scores["lr_e"], pomegranate_exact_linear_dict_scores["nb"], pomegranate_exact_linear_dict_scores["nb_g"], pomegranate_exact_linear_dict_scores["nb_m"], pomegranate_exact_linear_dict_scores["nb_c"], pomegranate_exact_linear_dict_scores["svm"], pomegranate_exact_linear_dict_scores["svm_po"], pomegranate_exact_linear_dict_scores["svm_r"], pomegranate_exact_linear_dict_scores["knn"], pomegranate_exact_linear_dict_scores["knn_d"]]
p_g_means = [pomegranate_greedy_linear_dict_scores["dt"],
pomegranate_greedy_linear_dict_scores["dt_e"],
pomegranate_greedy_linear_dict_scores["rf"],
pomegranate_greedy_linear_dict_scores["rf_e"],
pomegranate_greedy_linear_dict_scores["lr"],
pomegranate_greedy_linear_dict_scores["lr_l1"],
pomegranate_greedy_linear_dict_scores["lr_l2"],
pomegranate_greedy_linear_dict_scores["lr_e"],
pomegranate_greedy_linear_dict_scores["nb"],
pomegranate_greedy_linear_dict_scores["nb_g"],
pomegranate_greedy_linear_dict_scores["nb_m"],
pomegranate_greedy_linear_dict_scores["nb_c"],
pomegranate_greedy_linear_dict_scores["svm"],
pomegranate_greedy_linear_dict_scores["svm_po"],
pomegranate_greedy_linear_dict_scores["svm_r"],
pomegranate_greedy_linear_dict_scores["knn"],
pomegranate_greedy_linear_dict_scores["knn_d"]]
pgmpy_tree_means = [pgmpy_tree_linear_dict_scores["dt"],
pgmpy_tree_linear_dict_scores["dt_e"],
pgmpy_tree_linear_dict_scores["rf"],
pgmpy_tree_linear_dict_scores["rf_e"],
pgmpy_tree_linear_dict_scores["lr"],
pgmpy_tree_linear_dict_scores["lr_l1"],
pgmpy_tree_linear_dict_scores["lr_l2"],
pgmpy_tree_linear_dict_scores["lr_e"],
pgmpy_tree_linear_dict_scores["nb"],
pgmpy_tree_linear_dict_scores["nb_g"],
pgmpy_tree_linear_dict_scores["nb_m"],
pgmpy_tree_linear_dict_scores["nb_c"],
pgmpy_tree_linear_dict_scores["svm"],
pgmpy_tree_linear_dict_scores["svm_po"],
pgmpy_tree_linear_dict_scores["svm_r"],
pgmpy_tree_linear_dict_scores["knn"],
pgmpy_tree_linear_dict_scores["knn_d"]]
pgmpy_hc_means = [pgmpy_hc_linear_dict_scores["dt"],
pgmpy_hc_linear_dict_scores["dt_e"],
pgmpy_hc_linear_dict_scores["rf"],
pgmpy_hc_linear_dict_scores["rf_e"],
pgmpy_hc_linear_dict_scores["lr"],
pgmpy_hc_linear_dict_scores["lr_l1"],
pgmpy_hc_linear_dict_scores["lr_l2"],
pgmpy_hc_linear_dict_scores["lr_e"],
pgmpy_hc_linear_dict_scores["nb"],
pgmpy_hc_linear_dict_scores["nb_g"],
pgmpy_hc_linear_dict_scores["nb_m"],
pgmpy_hc_linear_dict_scores["nb_c"],
pgmpy_hc_linear_dict_scores["svm"],
pgmpy_hc_linear_dict_scores["svm_po"],
pgmpy_hc_linear_dict_scores["svm_r"],
pgmpy_hc_linear_dict_scores["knn"],
pgmpy_hc_linear_dict_scores["knn_d"]]
pgmpy_mmhc_means = [pgmpy_mmhc_linear_dict_scores["dt"],
pgmpy_mmhc_linear_dict_scores["dt_e"],
pgmpy_mmhc_linear_dict_scores["rf"],
pgmpy_mmhc_linear_dict_scores["rf_e"],
pgmpy_mmhc_linear_dict_scores["lr"],
pgmpy_mmhc_linear_dict_scores["lr_l1"],
pgmpy_mmhc_linear_dict_scores["lr_l2"],
pgmpy_mmhc_linear_dict_scores["lr_e"],
pgmpy_mmhc_linear_dict_scores["nb"],
pgmpy_mmhc_linear_dict_scores["nb_g"],
pgmpy_mmhc_linear_dict_scores["nb_m"],
pgmpy_mmhc_linear_dict_scores["nb_c"],
pgmpy_mmhc_linear_dict_scores["svm"],
pgmpy_mmhc_linear_dict_scores["svm_po"],
pgmpy_mmhc_linear_dict_scores["svm_r"],
pgmpy_mmhc_linear_dict_scores["knn"],
pgmpy_mmhc_linear_dict_scores["knn_d"]]
plt.rcParams["figure.figsize"] = [18, 18]
plt.rcParams["figure.autolayout"] = True
x_axis = np.arange(len(labels))
w = 0.05 # the width of the bars
plt.bar(x_axis +w, bn_means, width=0.05, label = "BN_LEARN (HC)", color="lightsteelblue")
plt.bar(x_axis + w * 2, nt_means, width=0.05, label="BN_LEARN (TABU)", color="cornflowerblue")
plt.bar(x_axis + w * 3, bn_pc_means, width=0.05, label="BN_LEARN (PC)", color="royalblue")
plt.bar(x_axis + w * 4, bn_mmhc_means, width=0.05, label="BN_LEARN (MMHC)", color="blue")
plt.bar(x_axis + w * 5, bn_rsmax2_means, width=0.05, label="BN_LEARN (RSMAX2)", color="mediumblue")
plt.bar(x_axis + w * 6, bn_h2pc_means, width=0.05, label="BN_LEARN (H2PC)", color="navy")
plt.bar(x_axis +w*7, nt_means, width=0.05, label="NO_TEARS (logistic)", color="limegreen")
plt.bar(x_axis +w*8, nt_l2_means, width=0.05, label="NO_TEARS (l2)", color="forestgreen")
plt.bar(x_axis + w * 9, nt_p_means, width=0.05, label="NO_TEARS (poisson)", color="darkgreen")
plt.bar(x_axis + w * 10, p_means, width=0.05, label="POMEGRANATE (exact)", color="darkviolet")
plt.bar(x_axis + w * 11, p_g_means, width=0.05, label="POMEGRANATE (greed)", color="rebeccapurple")
plt.bar(x_axis + w * 12, pgmpy_mmhc_means, width=0.05, label="PGMPY (MMHC)", color="#FA8072")
plt.bar(x_axis + w * 13, pgmpy_hc_means, width=0.05, label="PGMPY (HC)", color="#FF2400")
plt.bar(x_axis + w * 14, pgmpy_tree_means, width=0.05, label="PGMPY (TREE)", color="#7C0A02")
plt.xticks(x_axis, labels)
plt.legend()
plt.style.use("fivethirtyeight")
plt.ylabel('Accuracy')
plt.xlabel('ML Technique', labelpad=15)
plt.title('Linear Problem - Performance by library on ML technique')
#plt.ylim(0.6, 1)
#plt.tick_params(rotation=45)
plt.savefig('pipeline_summary_benchmark_for_linear_by_library_groupbar.png', bbox_inches='tight')
plt.show()
# Produce Non-Linear Problem by Library on Problem
# Group by figure
labels = ['DT_G', 'DT_E', 'RF_G', 'RF_E', 'LR', 'LR_L1', 'LR_L2', 'LR_E', 'NB_B', 'NB_G', 'NB_M', 'NB_C', 'SVM_S',
'SVM_P', 'SVM_R', 'KNN_W', 'KNN_D']
bn_non_means = [bnlearn_nonlinear_dict_scores["dt"], bnlearn_nonlinear_dict_scores["dt_e"],
bnlearn_nonlinear_dict_scores["rf"], bnlearn_nonlinear_dict_scores["rf_e"],
bnlearn_nonlinear_dict_scores["lr"], bnlearn_nonlinear_dict_scores["lr_l1"],
bnlearn_nonlinear_dict_scores["lr_l2"], bnlearn_nonlinear_dict_scores["lr_e"],
bnlearn_nonlinear_dict_scores["nb"], bnlearn_nonlinear_dict_scores["nb_g"],
bnlearn_nonlinear_dict_scores["nb_m"], bnlearn_nonlinear_dict_scores["nb_c"],
bnlearn_nonlinear_dict_scores["svm"], bnlearn_nonlinear_dict_scores["svm_po"],
bnlearn_nonlinear_dict_scores["svm_r"], bnlearn_nonlinear_dict_scores["knn"],
bnlearn_nonlinear_dict_scores["knn_d"]]
bn_tabu_non_means = [bnlearn_tabu_nonlinear_dict_scores["dt"],
bnlearn_tabu_nonlinear_dict_scores["dt_e"],
bnlearn_tabu_nonlinear_dict_scores["rf"],
bnlearn_tabu_nonlinear_dict_scores["rf_e"],
bnlearn_tabu_nonlinear_dict_scores["lr"],
bnlearn_tabu_nonlinear_dict_scores["lr_l1"],
bnlearn_tabu_nonlinear_dict_scores["lr_l2"],
bnlearn_tabu_nonlinear_dict_scores["lr_e"],
bnlearn_tabu_nonlinear_dict_scores["nb"],
bnlearn_tabu_nonlinear_dict_scores["nb_g"],
bnlearn_tabu_nonlinear_dict_scores["nb_m"],
bnlearn_tabu_nonlinear_dict_scores["nb_c"],
bnlearn_tabu_nonlinear_dict_scores["svm"],
bnlearn_tabu_nonlinear_dict_scores["svm_po"],
bnlearn_tabu_nonlinear_dict_scores["svm_r"],
bnlearn_tabu_nonlinear_dict_scores["knn"],
bnlearn_tabu_nonlinear_dict_scores["knn_d"]]
bn_mmhc_non_means = [bnlearn_mmhc_nonlinear_dict_scores["dt"],
bnlearn_mmhc_nonlinear_dict_scores["dt_e"],
bnlearn_mmhc_nonlinear_dict_scores["rf"],
bnlearn_mmhc_nonlinear_dict_scores["rf_e"],
bnlearn_mmhc_nonlinear_dict_scores["lr"],
bnlearn_mmhc_nonlinear_dict_scores["lr_l1"],
bnlearn_mmhc_nonlinear_dict_scores["lr_l2"],
bnlearn_mmhc_nonlinear_dict_scores["lr_e"],
bnlearn_mmhc_nonlinear_dict_scores["nb"],
bnlearn_mmhc_nonlinear_dict_scores["nb_g"],
bnlearn_mmhc_nonlinear_dict_scores["nb_m"],
bnlearn_mmhc_nonlinear_dict_scores["nb_c"],
bnlearn_mmhc_nonlinear_dict_scores["svm"],
bnlearn_mmhc_nonlinear_dict_scores["svm_po"],
bnlearn_mmhc_nonlinear_dict_scores["svm_r"],
bnlearn_mmhc_nonlinear_dict_scores["knn"],
bnlearn_mmhc_nonlinear_dict_scores["knn_d"]]
bn_rsmax2_non_means = [bnlearn_rsmax2_nonlinear_dict_scores["dt"],
bnlearn_rsmax2_nonlinear_dict_scores["dt_e"],
bnlearn_rsmax2_nonlinear_dict_scores["rf"],
bnlearn_rsmax2_nonlinear_dict_scores["rf_e"],
bnlearn_rsmax2_nonlinear_dict_scores["lr"],
bnlearn_rsmax2_nonlinear_dict_scores["lr_l1"],
bnlearn_rsmax2_nonlinear_dict_scores["lr_l2"],
bnlearn_rsmax2_nonlinear_dict_scores["lr_e"],
bnlearn_rsmax2_nonlinear_dict_scores["nb"],
bnlearn_rsmax2_nonlinear_dict_scores["nb_g"],
bnlearn_rsmax2_nonlinear_dict_scores["nb_m"],
bnlearn_rsmax2_nonlinear_dict_scores["nb_c"],
bnlearn_rsmax2_nonlinear_dict_scores["svm"],
bnlearn_rsmax2_nonlinear_dict_scores["svm_po"],
bnlearn_rsmax2_nonlinear_dict_scores["svm_r"],
bnlearn_rsmax2_nonlinear_dict_scores["knn"],
bnlearn_rsmax2_nonlinear_dict_scores["knn_d"]]
bn_h2pc_non_means = [bnlearn_h2pc_nonlinear_dict_scores["dt"],
bnlearn_h2pc_nonlinear_dict_scores["dt_e"],
bnlearn_h2pc_nonlinear_dict_scores["rf"],
bnlearn_h2pc_nonlinear_dict_scores["rf_e"],
bnlearn_h2pc_nonlinear_dict_scores["lr"],
bnlearn_h2pc_nonlinear_dict_scores["lr_l1"],
bnlearn_h2pc_nonlinear_dict_scores["lr_l2"],
bnlearn_h2pc_nonlinear_dict_scores["lr_e"],
bnlearn_h2pc_nonlinear_dict_scores["nb"],
bnlearn_h2pc_nonlinear_dict_scores["nb_g"],
bnlearn_h2pc_nonlinear_dict_scores["nb_m"],
bnlearn_h2pc_nonlinear_dict_scores["nb_c"],
bnlearn_h2pc_nonlinear_dict_scores["svm"],
bnlearn_h2pc_nonlinear_dict_scores["svm_po"],
bnlearn_h2pc_nonlinear_dict_scores["svm_r"],
bnlearn_h2pc_nonlinear_dict_scores["knn"],
bnlearn_h2pc_nonlinear_dict_scores["knn_d"]]
nt_non_means = [notears_nonlinear_dict_scores["dt"], notears_nonlinear_dict_scores["dt_e"],
notears_nonlinear_dict_scores["rf"], notears_nonlinear_dict_scores["rf_e"],
notears_nonlinear_dict_scores["lr"], notears_nonlinear_dict_scores["lr_l1"],
notears_nonlinear_dict_scores["lr_l2"], notears_nonlinear_dict_scores["lr_e"],
notears_nonlinear_dict_scores["nb"], notears_nonlinear_dict_scores["nb_g"],
notears_nonlinear_dict_scores["nb_m"], notears_nonlinear_dict_scores["nb_c"],
notears_nonlinear_dict_scores["svm"], notears_nonlinear_dict_scores["svm_po"],
notears_nonlinear_dict_scores["svm_r"], notears_nonlinear_dict_scores["knn"],
notears_nonlinear_dict_scores["knn_d"]]
nt_l2_non_means = [notears_l2_nonlinear_dict_scores["dt"],
notears_l2_nonlinear_dict_scores["dt_e"],
notears_l2_nonlinear_dict_scores["rf"],
notears_l2_nonlinear_dict_scores["rf_e"],
notears_l2_nonlinear_dict_scores["lr"],
notears_l2_nonlinear_dict_scores["lr_l1"],
notears_l2_nonlinear_dict_scores["lr_l2"],
notears_l2_nonlinear_dict_scores["lr_e"],
notears_l2_nonlinear_dict_scores["nb"],
notears_l2_nonlinear_dict_scores["nb_g"],
notears_l2_nonlinear_dict_scores["nb_m"],
notears_l2_nonlinear_dict_scores["nb_c"],
notears_l2_nonlinear_dict_scores["svm"],
notears_l2_nonlinear_dict_scores["svm_po"],
notears_l2_nonlinear_dict_scores["svm_r"],
notears_l2_nonlinear_dict_scores["knn"],
notears_l2_nonlinear_dict_scores["knn_d"]]
nt_p_non_means = [notears_poisson_nonlinear_dict_scores["dt"],
notears_poisson_nonlinear_dict_scores["dt_e"],
notears_poisson_nonlinear_dict_scores["rf"],
notears_poisson_nonlinear_dict_scores["rf_e"],
notears_poisson_nonlinear_dict_scores["lr"],
notears_poisson_nonlinear_dict_scores["lr_l1"],
notears_poisson_nonlinear_dict_scores["lr_l2"],
notears_poisson_nonlinear_dict_scores["lr_e"],
notears_poisson_nonlinear_dict_scores["nb"],
notears_poisson_nonlinear_dict_scores["nb_g"],
notears_poisson_nonlinear_dict_scores["nb_m"],
notears_poisson_nonlinear_dict_scores["nb_c"],
notears_poisson_nonlinear_dict_scores["svm"],
notears_poisson_nonlinear_dict_scores["svm_po"],
notears_poisson_nonlinear_dict_scores["svm_r"],
notears_poisson_nonlinear_dict_scores["knn"],
notears_poisson_nonlinear_dict_scores["knn_d"]]
p_non_means = [pomegranate_exact_nonlinear_dict_scores["dt"],
pomegranate_exact_nonlinear_dict_scores["dt_e"],
pomegranate_exact_nonlinear_dict_scores["rf"],
pomegranate_exact_nonlinear_dict_scores["rf_e"],
pomegranate_exact_nonlinear_dict_scores["lr"],
pomegranate_exact_nonlinear_dict_scores["lr_l1"],
pomegranate_exact_nonlinear_dict_scores["lr_l2"],
pomegranate_exact_nonlinear_dict_scores["lr_e"],
pomegranate_exact_nonlinear_dict_scores["nb"],
pomegranate_exact_nonlinear_dict_scores["nb_g"],
pomegranate_exact_nonlinear_dict_scores["nb_m"],
pomegranate_exact_nonlinear_dict_scores["nb_c"],
pomegranate_exact_nonlinear_dict_scores["svm"],
pomegranate_exact_nonlinear_dict_scores["svm_po"],
pomegranate_exact_nonlinear_dict_scores["svm_r"],
pomegranate_exact_nonlinear_dict_scores["knn"],
pomegranate_exact_nonlinear_dict_scores["knn_d"]]
p_g_non_means = [pomegranate_greedy_nonlinear_dict_scores["dt"],
pomegranate_greedy_nonlinear_dict_scores["dt_e"],
pomegranate_greedy_nonlinear_dict_scores["rf"],
pomegranate_greedy_nonlinear_dict_scores["rf_e"],
pomegranate_greedy_nonlinear_dict_scores["lr"],
pomegranate_greedy_nonlinear_dict_scores["lr_l1"],
pomegranate_greedy_nonlinear_dict_scores["lr_l2"],
pomegranate_greedy_nonlinear_dict_scores["lr_e"],
pomegranate_greedy_nonlinear_dict_scores["nb"],
pomegranate_greedy_nonlinear_dict_scores["nb_g"],
pomegranate_greedy_nonlinear_dict_scores["nb_m"],
pomegranate_greedy_nonlinear_dict_scores["nb_c"],
pomegranate_greedy_nonlinear_dict_scores["svm"],
pomegranate_greedy_nonlinear_dict_scores["svm_po"],
pomegranate_greedy_nonlinear_dict_scores["svm_r"],
pomegranate_greedy_nonlinear_dict_scores["knn"],
pomegranate_greedy_nonlinear_dict_scores["knn_d"]]
pgmpy_tree_non_means = [pgmpy_tree_nonlinear_dict_scores["dt"],
pgmpy_tree_nonlinear_dict_scores["dt_e"],
pgmpy_tree_nonlinear_dict_scores["rf"],
pgmpy_tree_nonlinear_dict_scores["rf_e"],
pgmpy_tree_nonlinear_dict_scores["lr"],
pgmpy_tree_nonlinear_dict_scores["lr_l1"],
pgmpy_tree_nonlinear_dict_scores["lr_l2"],
pgmpy_tree_nonlinear_dict_scores["lr_e"],
pgmpy_tree_nonlinear_dict_scores["nb"],
pgmpy_tree_nonlinear_dict_scores["nb_g"],
pgmpy_tree_nonlinear_dict_scores["nb_m"],
pgmpy_tree_nonlinear_dict_scores["nb_c"],
pgmpy_tree_nonlinear_dict_scores["svm"],
pgmpy_tree_nonlinear_dict_scores["svm_po"],
pgmpy_tree_nonlinear_dict_scores["svm_r"],
pgmpy_tree_nonlinear_dict_scores["knn"],
pgmpy_tree_nonlinear_dict_scores["knn_d"]]
pgmpy_hc_non_means = [pgmpy_hc_nonlinear_dict_scores["dt"],
pgmpy_hc_nonlinear_dict_scores["dt_e"],
pgmpy_hc_nonlinear_dict_scores["rf"],
pgmpy_hc_nonlinear_dict_scores["rf_e"],
pgmpy_hc_nonlinear_dict_scores["lr"],
pgmpy_hc_nonlinear_dict_scores["lr_l1"],
pgmpy_hc_nonlinear_dict_scores["lr_l2"],
pgmpy_hc_nonlinear_dict_scores["lr_e"],
pgmpy_hc_nonlinear_dict_scores["nb"],
pgmpy_hc_nonlinear_dict_scores["nb_g"],
pgmpy_hc_nonlinear_dict_scores["nb_m"],
pgmpy_hc_nonlinear_dict_scores["nb_c"],
pgmpy_hc_nonlinear_dict_scores["svm"],
pgmpy_hc_nonlinear_dict_scores["svm_po"],
pgmpy_hc_nonlinear_dict_scores["svm_r"],
pgmpy_hc_nonlinear_dict_scores["knn"],
pgmpy_hc_nonlinear_dict_scores["knn_d"]]
pgmpy_mmhc_non_means = [pgmpy_mmhc_nonlinear_dict_scores["dt"],
pgmpy_mmhc_nonlinear_dict_scores["dt_e"],
pgmpy_mmhc_nonlinear_dict_scores["rf"],
pgmpy_mmhc_nonlinear_dict_scores["rf_e"],
pgmpy_mmhc_nonlinear_dict_scores["lr"],
pgmpy_mmhc_nonlinear_dict_scores["lr_l1"],
pgmpy_mmhc_nonlinear_dict_scores["lr_l2"],
pgmpy_mmhc_nonlinear_dict_scores["lr_e"],
pgmpy_mmhc_nonlinear_dict_scores["nb"],
pgmpy_mmhc_nonlinear_dict_scores["nb_g"],
pgmpy_mmhc_nonlinear_dict_scores["nb_m"],
pgmpy_mmhc_nonlinear_dict_scores["nb_c"],
pgmpy_mmhc_nonlinear_dict_scores["svm"],
pgmpy_mmhc_nonlinear_dict_scores["svm_po"],
pgmpy_mmhc_nonlinear_dict_scores["svm_r"],
pgmpy_mmhc_nonlinear_dict_scores["knn"],
pgmpy_mmhc_nonlinear_dict_scores["knn_d"]]
plt.rcParams["figure.figsize"] = [18, 18]
plt.rcParams["figure.autolayout"] = True
x_axis = np.arange(len(labels))
w = 0.05 # the width of the bars
plt.bar(x_axis + w, bn_non_means, width=0.05, label="BN_LEARN (HC)", color="lightsteelblue")
plt.bar(x_axis + w * 2, nt_non_means, width=0.05, label="BN_LEARN (TABU)", color="cornflowerblue")
plt.bar(x_axis + w * 3, bn_mmhc_non_means, width=0.05, label="BN_LEARN (MMHC)", color="blue")
plt.bar(x_axis + w * 4, bn_rsmax2_non_means, width=0.05, label="BN_LEARN (RSMAX2)", color="mediumblue")
plt.bar(x_axis + w * 5, bn_h2pc_non_means, width=0.05, label="BN_LEARN (H2PC)", color="navy")
plt.bar(x_axis + w * 6, nt_non_means, width=0.05, label="NO_TEARS (logistic)", color="limegreen")
plt.bar(x_axis + w * 7, nt_l2_non_means, width=0.05, label="NO_TEARS (l2)", color="forestgreen")
plt.bar(x_axis + w * 8, nt_p_non_means, width=0.05, label="NO_TEARS (poisson)", color="darkgreen")
plt.bar(x_axis + w * 9, p_non_means, width=0.05, label="POMEGRANATE (exact)", color="darkviolet")
plt.bar(x_axis + w * 10, p_g_non_means, width=0.05, label="POMEGRANATE (greed)", color="rebeccapurple")
plt.bar(x_axis + w * 11, pgmpy_mmhc_non_means, width=0.05, label="PGMPY (MMHC)", color="#FA8072")
plt.bar(x_axis + w * 12, pgmpy_hc_non_means, width=0.05, label="PGMPY (HC)", color="#FF2400")
plt.bar(x_axis + w * 13, pgmpy_tree_non_means, width=0.05, label="PGMPY (TREE)", color="#7C0A02")
plt.xticks(x_axis, labels)
plt.legend()
plt.style.use("fivethirtyeight")
plt.ylabel('Accuracy')
plt.xlabel('ML Technique', labelpad=15)
plt.title('Non-Linear Problem - Performance by library on ML technique')
#plt.ylim(0.6, 1)
# plt.tick_params(rotation=45)
plt.savefig('pipeline_summary_benchmark_for_nonlinear_by_library_groupbar.png', bbox_inches='tight')
plt.show()
# Produce Sparse Problem by Library on Problem
# Group by figure
labels = ['DT_G', 'DT_E', 'RF_G', 'RF_E', 'LR', 'LR_L1', 'LR_L2', 'LR_E', 'NB_B', 'NB_G', 'NB_M', 'NB_C', 'SVM_S',
'SVM_P', 'SVM_R', 'KNN_W', 'KNN_D']
bn_sparse_means = [bnlearn_sparse_dict_scores["dt"], bnlearn_sparse_dict_scores["dt_e"], bnlearn_sparse_dict_scores["rf"], bnlearn_sparse_dict_scores["rf_e"], bnlearn_sparse_dict_scores["lr"], bnlearn_sparse_dict_scores["lr_l1"], bnlearn_sparse_dict_scores["lr_l2"], bnlearn_sparse_dict_scores["lr_e"], bnlearn_sparse_dict_scores["nb"], bnlearn_sparse_dict_scores["nb_g"], bnlearn_sparse_dict_scores["nb_m"], bnlearn_sparse_dict_scores["nb_c"], bnlearn_sparse_dict_scores["svm"], bnlearn_sparse_dict_scores["svm_po"], bnlearn_sparse_dict_scores["svm_r"], bnlearn_sparse_dict_scores["knn"], bnlearn_sparse_dict_scores["knn_d"]]
bn_tabu_sparse_means = [bnlearn_tabu_sparse_dict_scores["dt"], bnlearn_tabu_sparse_dict_scores["dt_e"],
bnlearn_tabu_sparse_dict_scores["rf"], bnlearn_tabu_sparse_dict_scores["rf_e"],
bnlearn_tabu_sparse_dict_scores["lr"], bnlearn_tabu_sparse_dict_scores["lr_l1"],
bnlearn_tabu_sparse_dict_scores["lr_l2"], bnlearn_tabu_sparse_dict_scores["lr_e"],
bnlearn_tabu_sparse_dict_scores["nb"], bnlearn_tabu_sparse_dict_scores["nb_g"],
bnlearn_tabu_sparse_dict_scores["nb_m"], bnlearn_tabu_sparse_dict_scores["nb_c"],
bnlearn_tabu_sparse_dict_scores["svm"], bnlearn_tabu_sparse_dict_scores["svm_po"],
bnlearn_tabu_sparse_dict_scores["svm_r"], bnlearn_tabu_sparse_dict_scores["knn"],
bnlearn_tabu_sparse_dict_scores["knn_d"]]
bn_mmhc_sparse_means = [bnlearn_mmhc_sparse_dict_scores["dt"], bnlearn_mmhc_sparse_dict_scores["dt_e"],
bnlearn_mmhc_sparse_dict_scores["rf"], bnlearn_mmhc_sparse_dict_scores["rf_e"],
bnlearn_mmhc_sparse_dict_scores["lr"], bnlearn_mmhc_sparse_dict_scores["lr_l1"],
bnlearn_mmhc_sparse_dict_scores["lr_l2"], bnlearn_mmhc_sparse_dict_scores["lr_e"],
bnlearn_mmhc_sparse_dict_scores["nb"], bnlearn_mmhc_sparse_dict_scores["nb_g"],
bnlearn_mmhc_sparse_dict_scores["nb_m"], bnlearn_mmhc_sparse_dict_scores["nb_c"],
bnlearn_mmhc_sparse_dict_scores["svm"], bnlearn_mmhc_sparse_dict_scores["svm_po"],
bnlearn_mmhc_sparse_dict_scores["svm_r"], bnlearn_mmhc_sparse_dict_scores["knn"],
bnlearn_mmhc_sparse_dict_scores["knn_d"]]
bn_rsmax2_sparse_means = [bnlearn_rsmax2_sparse_dict_scores["dt"], bnlearn_rsmax2_sparse_dict_scores["dt_e"],
bnlearn_rsmax2_sparse_dict_scores["rf"], bnlearn_rsmax2_sparse_dict_scores["rf_e"],
bnlearn_rsmax2_sparse_dict_scores["lr"], bnlearn_rsmax2_sparse_dict_scores["lr_l1"],
bnlearn_rsmax2_sparse_dict_scores["lr_l2"], bnlearn_rsmax2_sparse_dict_scores["lr_e"],
bnlearn_rsmax2_sparse_dict_scores["nb"], bnlearn_rsmax2_sparse_dict_scores["nb_g"],
bnlearn_rsmax2_sparse_dict_scores["nb_m"], bnlearn_rsmax2_sparse_dict_scores["nb_c"],
bnlearn_rsmax2_sparse_dict_scores["svm"], bnlearn_rsmax2_sparse_dict_scores["svm_po"],
bnlearn_rsmax2_sparse_dict_scores["svm_r"], bnlearn_rsmax2_sparse_dict_scores["knn"],
bnlearn_rsmax2_sparse_dict_scores["knn_d"]]
bn_h2pc_sparse_means = [bnlearn_h2pc_sparse_dict_scores["dt"], bnlearn_h2pc_sparse_dict_scores["dt_e"],
bnlearn_h2pc_sparse_dict_scores["rf"], bnlearn_h2pc_sparse_dict_scores["rf_e"],
bnlearn_h2pc_sparse_dict_scores["lr"], bnlearn_h2pc_sparse_dict_scores["lr_l1"],
bnlearn_h2pc_sparse_dict_scores["lr_l2"], bnlearn_h2pc_sparse_dict_scores["lr_e"],
bnlearn_h2pc_sparse_dict_scores["nb"], bnlearn_h2pc_sparse_dict_scores["nb_g"],
bnlearn_h2pc_sparse_dict_scores["nb_m"], bnlearn_h2pc_sparse_dict_scores["nb_c"],
bnlearn_h2pc_sparse_dict_scores["svm"], bnlearn_h2pc_sparse_dict_scores["svm_po"],
bnlearn_h2pc_sparse_dict_scores["svm_r"], bnlearn_h2pc_sparse_dict_scores["knn"],
bnlearn_h2pc_sparse_dict_scores["knn_d"]]
nt_sparse_means = [notears_sparse_dict_scores["dt"], notears_sparse_dict_scores["dt_e"], notears_sparse_dict_scores["rf"], notears_sparse_dict_scores["rf_e"], notears_sparse_dict_scores["lr"], notears_sparse_dict_scores["lr_l1"], notears_sparse_dict_scores["lr_l2"], notears_sparse_dict_scores["lr_e"], notears_sparse_dict_scores["nb"], notears_sparse_dict_scores["nb_g"], notears_sparse_dict_scores["nb_m"], notears_sparse_dict_scores["nb_c"], notears_sparse_dict_scores["svm"], notears_sparse_dict_scores["svm_po"], notears_sparse_dict_scores["svm_r"], notears_sparse_dict_scores["knn"], notears_sparse_dict_scores["knn_d"]]
nt_l2_sparse_means = [notears_l2_sparse_dict_scores["dt"], notears_l2_sparse_dict_scores["dt_e"],
notears_l2_sparse_dict_scores["rf"], notears_l2_sparse_dict_scores["rf_e"],
notears_l2_sparse_dict_scores["lr"], notears_l2_sparse_dict_scores["lr_l1"],
notears_l2_sparse_dict_scores["lr_l2"], notears_l2_sparse_dict_scores["lr_e"],
notears_l2_sparse_dict_scores["nb"], notears_l2_sparse_dict_scores["nb_g"],
notears_l2_sparse_dict_scores["nb_m"], notears_l2_sparse_dict_scores["nb_c"],
notears_l2_sparse_dict_scores["svm"], notears_l2_sparse_dict_scores["svm_po"],
notears_l2_sparse_dict_scores["svm_r"], notears_l2_sparse_dict_scores["knn"],
notears_l2_sparse_dict_scores["knn_d"]]
nt_p_sparse_means = [notears_poisson_sparse_dict_scores["dt"], notears_poisson_sparse_dict_scores["dt_e"],
notears_poisson_sparse_dict_scores["rf"], notears_poisson_sparse_dict_scores["rf_e"],
notears_poisson_sparse_dict_scores["lr"], notears_poisson_sparse_dict_scores["lr_l1"],
notears_poisson_sparse_dict_scores["lr_l2"], notears_poisson_sparse_dict_scores["lr_e"],
notears_poisson_sparse_dict_scores["nb"], notears_poisson_sparse_dict_scores["nb_g"],
notears_poisson_sparse_dict_scores["nb_m"], notears_poisson_sparse_dict_scores["nb_c"],
notears_poisson_sparse_dict_scores["svm"], notears_poisson_sparse_dict_scores["svm_po"],
notears_poisson_sparse_dict_scores["svm_r"], notears_poisson_sparse_dict_scores["knn"],
notears_poisson_sparse_dict_scores["knn_d"]]
p_sparse_means = [pomegranate_exact_sparse_dict_scores["dt"], pomegranate_exact_sparse_dict_scores["dt_e"], pomegranate_exact_sparse_dict_scores["rf"], pomegranate_exact_sparse_dict_scores["rf_e"], pomegranate_exact_sparse_dict_scores["lr"], pomegranate_exact_sparse_dict_scores["lr_l1"], pomegranate_exact_sparse_dict_scores["lr_l2"], pomegranate_exact_sparse_dict_scores["lr_e"], pomegranate_exact_sparse_dict_scores["nb"], pomegranate_exact_sparse_dict_scores["nb_g"], pomegranate_exact_sparse_dict_scores["nb_m"], pomegranate_exact_sparse_dict_scores["nb_c"], pomegranate_exact_sparse_dict_scores["svm"], pomegranate_exact_sparse_dict_scores["svm_po"], pomegranate_exact_sparse_dict_scores["svm_r"], pomegranate_exact_sparse_dict_scores["knn"], pomegranate_exact_sparse_dict_scores["knn_d"]]
p_g_sparse_means = [pomegranate_greedy_sparse_dict_scores["dt"],
pomegranate_greedy_sparse_dict_scores["dt_e"],
pomegranate_greedy_sparse_dict_scores["rf"],
pomegranate_greedy_sparse_dict_scores["rf_e"],
pomegranate_greedy_sparse_dict_scores["lr"],
pomegranate_greedy_sparse_dict_scores["lr_l1"],
pomegranate_greedy_sparse_dict_scores["lr_l2"],
pomegranate_greedy_sparse_dict_scores["lr_e"],
pomegranate_greedy_sparse_dict_scores["nb"],
pomegranate_greedy_sparse_dict_scores["nb_g"],
pomegranate_greedy_sparse_dict_scores["nb_m"],
pomegranate_greedy_sparse_dict_scores["nb_c"],
pomegranate_greedy_sparse_dict_scores["svm"],
pomegranate_greedy_sparse_dict_scores["svm_po"],
pomegranate_greedy_sparse_dict_scores["svm_r"],
pomegranate_greedy_sparse_dict_scores["knn"],
pomegranate_greedy_sparse_dict_scores["knn_d"]]
pgmpy_tree_sparse_means = [pgmpy_tree_sparse_dict_scores["dt"],
pgmpy_tree_sparse_dict_scores["dt_e"],
pgmpy_tree_sparse_dict_scores["rf"],
pgmpy_tree_sparse_dict_scores["rf_e"],
pgmpy_tree_sparse_dict_scores["lr"],
pgmpy_tree_sparse_dict_scores["lr_l1"],
pgmpy_tree_sparse_dict_scores["lr_l2"],
pgmpy_tree_sparse_dict_scores["lr_e"],
pgmpy_tree_sparse_dict_scores["nb"],
pgmpy_tree_sparse_dict_scores["nb_g"],
pgmpy_tree_sparse_dict_scores["nb_m"],
pgmpy_tree_sparse_dict_scores["nb_c"],
pgmpy_tree_sparse_dict_scores["svm"],
pgmpy_tree_sparse_dict_scores["svm_po"],
pgmpy_tree_sparse_dict_scores["svm_r"],
pgmpy_tree_sparse_dict_scores["knn"],
pgmpy_tree_sparse_dict_scores["knn_d"]]
pgmpy_hc_sparse_means = [pgmpy_hc_sparse_dict_scores["dt"],
pgmpy_hc_sparse_dict_scores["dt_e"],
pgmpy_hc_sparse_dict_scores["rf"],
pgmpy_hc_sparse_dict_scores["rf_e"],
pgmpy_hc_sparse_dict_scores["lr"],
pgmpy_hc_sparse_dict_scores["lr_l1"],
pgmpy_hc_sparse_dict_scores["lr_l2"],
pgmpy_hc_sparse_dict_scores["lr_e"],
pgmpy_hc_sparse_dict_scores["nb"],
pgmpy_hc_sparse_dict_scores["nb_g"],
pgmpy_hc_sparse_dict_scores["nb_m"],
pgmpy_hc_sparse_dict_scores["nb_c"],
pgmpy_hc_sparse_dict_scores["svm"],
pgmpy_hc_sparse_dict_scores["svm_po"],
pgmpy_hc_sparse_dict_scores["svm_r"],
pgmpy_hc_sparse_dict_scores["knn"],
pgmpy_hc_sparse_dict_scores["knn_d"]]
pgmpy_mmhc_sparse_means = [pgmpy_mmhc_sparse_dict_scores["dt"],
pgmpy_mmhc_sparse_dict_scores["dt_e"],
pgmpy_mmhc_sparse_dict_scores["rf"],
pgmpy_mmhc_sparse_dict_scores["rf_e"],
pgmpy_mmhc_sparse_dict_scores["lr"],
pgmpy_mmhc_sparse_dict_scores["lr_l1"],
pgmpy_mmhc_sparse_dict_scores["lr_l2"],
pgmpy_mmhc_sparse_dict_scores["lr_e"],
pgmpy_mmhc_sparse_dict_scores["nb"],
pgmpy_mmhc_sparse_dict_scores["nb_g"],
pgmpy_mmhc_sparse_dict_scores["nb_m"],
pgmpy_mmhc_sparse_dict_scores["nb_c"],
pgmpy_mmhc_sparse_dict_scores["svm"],
pgmpy_mmhc_sparse_dict_scores["svm_po"],
pgmpy_mmhc_sparse_dict_scores["svm_r"],
pgmpy_mmhc_sparse_dict_scores["knn"],
pgmpy_mmhc_sparse_dict_scores["knn_d"]]
plt.rcParams["figure.figsize"] = [18, 18]
plt.rcParams["figure.autolayout"] = True
x_axis = np.arange(len(labels))
w = 0.05 # the width of the bars
plt.bar(x_axis +w, bn_sparse_means, width=0.05, label = "BN_LEARN (HC)", color="lightsteelblue")
plt.bar(x_axis + w * 2, nt_sparse_means, width=0.05, label="BN_LEARN (TABU)", color="cornflowerblue")
plt.bar(x_axis + w * 3, bn_mmhc_sparse_means, width=0.05, label="BN_LEARN (MMHC)", color="blue")
plt.bar(x_axis + w * 4, bn_rsmax2_sparse_means, width=0.05, label="BN_LEARN (RSMAX2)", color="mediumblue")
plt.bar(x_axis + w * 5, bn_h2pc_sparse_means, width=0.05, label="BN_LEARN (H2PC)", color="navy")
plt.bar(x_axis +w*6, nt_sparse_means, width=0.05, label="NO_TEARS (logistic)", color="limegreen")
plt.bar(x_axis +w*7, nt_l2_sparse_means, width=0.05, label="NO_TEARS (l2)", color="forestgreen")
plt.bar(x_axis + w * 8, nt_p_sparse_means, width=0.05, label="NO_TEARS (poisson)", color="darkgreen")
plt.bar(x_axis + w * 9, p_sparse_means, width=0.05, label="POMEGRANATE (exact)", color="darkviolet")
plt.bar(x_axis + w * 10, p_g_sparse_means, width=0.05, label="POMEGRANATE (greed)", color="rebeccapurple")
plt.bar(x_axis + w * 11, pgmpy_mmhc_sparse_means, width=0.05, label="PGMPY (MMHC)", color="#FA8072")
plt.bar(x_axis + w * 12, pgmpy_hc_sparse_means, width=0.05, label="PGMPY (HC)", color="#FF2400")
plt.bar(x_axis + w * 13, pgmpy_tree_sparse_means, width=0.05, label="PGMPY (TREE)", color="#7C0A02")
plt.xticks(x_axis, labels)
plt.legend()
plt.style.use("fivethirtyeight")
plt.ylabel('Accuracy')
plt.xlabel('ML Technique', labelpad=15)
plt.title('Sparse Problem - Performance by library on ML technique')
#plt.ylim(0.6, 1)
#plt.tick_params(rotation=45)
plt.savefig('pipeline_summary_benchmark_for_sparse_by_library_groupbar.png', bbox_inches='tight')
plt.show()
# Produce Dimensional Problem by Library on Problem
# Group by figure
labels = ['DT_G', 'DT_E', 'RF_G', 'RF_E', 'LR', 'LR_L1', 'LR_L2', 'LR_E', 'NB_B', 'NB_G', 'NB_M', 'NB_C', 'SVM_S',
'SVM_P', 'SVM_R', 'KNN_W', 'KNN_D']
bn_dimension_means = [bnlearn_dimension_dict_scores["dt"], bnlearn_dimension_dict_scores["dt_e"], bnlearn_dimension_dict_scores["rf"], bnlearn_dimension_dict_scores["rf_e"], bnlearn_dimension_dict_scores["lr"], bnlearn_dimension_dict_scores["lr_l1"], bnlearn_dimension_dict_scores["lr_l2"], bnlearn_dimension_dict_scores["lr_e"], bnlearn_dimension_dict_scores["nb"], bnlearn_dimension_dict_scores["nb_g"], bnlearn_dimension_dict_scores["nb_m"], bnlearn_dimension_dict_scores["nb_c"], bnlearn_dimension_dict_scores["svm"], bnlearn_dimension_dict_scores["svm_po"], bnlearn_dimension_dict_scores["svm_r"], bnlearn_dimension_dict_scores["knn"], bnlearn_dimension_dict_scores["knn_d"]]
bn_tabu_dimension_means = [bnlearn_tabu_dimension_dict_scores["dt"], bnlearn_tabu_dimension_dict_scores["dt_e"],
bnlearn_tabu_dimension_dict_scores["rf"], bnlearn_tabu_dimension_dict_scores["rf_e"],
bnlearn_tabu_dimension_dict_scores["lr"], bnlearn_tabu_dimension_dict_scores["lr_l1"],
bnlearn_tabu_dimension_dict_scores["lr_l2"], bnlearn_tabu_dimension_dict_scores["lr_e"],
bnlearn_tabu_dimension_dict_scores["nb"], bnlearn_tabu_dimension_dict_scores["nb_g"],
bnlearn_tabu_dimension_dict_scores["nb_m"], bnlearn_tabu_dimension_dict_scores["nb_c"],
bnlearn_tabu_dimension_dict_scores["svm"], bnlearn_tabu_dimension_dict_scores["svm_po"],
bnlearn_tabu_dimension_dict_scores["svm_r"], bnlearn_tabu_dimension_dict_scores["knn"],
bnlearn_tabu_dimension_dict_scores["knn_d"]]
bn_mmhc_dimension_means = [bnlearn_mmhc_dimension_dict_scores["dt"], bnlearn_mmhc_dimension_dict_scores["dt_e"],
bnlearn_mmhc_dimension_dict_scores["rf"], bnlearn_mmhc_dimension_dict_scores["rf_e"],
bnlearn_mmhc_dimension_dict_scores["lr"], bnlearn_mmhc_dimension_dict_scores["lr_l1"],
bnlearn_mmhc_dimension_dict_scores["lr_l2"], bnlearn_mmhc_dimension_dict_scores["lr_e"],
bnlearn_mmhc_dimension_dict_scores["nb"], bnlearn_mmhc_dimension_dict_scores["nb_g"],
bnlearn_mmhc_dimension_dict_scores["nb_m"], bnlearn_mmhc_dimension_dict_scores["nb_c"],
bnlearn_mmhc_dimension_dict_scores["svm"], bnlearn_mmhc_dimension_dict_scores["svm_po"],
bnlearn_mmhc_dimension_dict_scores["svm_r"], bnlearn_mmhc_dimension_dict_scores["knn"],
bnlearn_mmhc_dimension_dict_scores["knn_d"]]
bn_rsmax2_dimension_means = [bnlearn_rsmax2_dimension_dict_scores["dt"], bnlearn_rsmax2_dimension_dict_scores["dt_e"],
bnlearn_rsmax2_dimension_dict_scores["rf"], bnlearn_rsmax2_dimension_dict_scores["rf_e"],
bnlearn_rsmax2_dimension_dict_scores["lr"], bnlearn_rsmax2_dimension_dict_scores["lr_l1"],
bnlearn_rsmax2_dimension_dict_scores["lr_l2"], bnlearn_rsmax2_dimension_dict_scores["lr_e"],
bnlearn_rsmax2_dimension_dict_scores["nb"], bnlearn_rsmax2_dimension_dict_scores["nb_g"],
bnlearn_rsmax2_dimension_dict_scores["nb_m"], bnlearn_rsmax2_dimension_dict_scores["nb_c"],
bnlearn_rsmax2_dimension_dict_scores["svm"], bnlearn_rsmax2_dimension_dict_scores["svm_po"],
bnlearn_rsmax2_dimension_dict_scores["svm_r"], bnlearn_rsmax2_dimension_dict_scores["knn"],
bnlearn_rsmax2_dimension_dict_scores["knn_d"]]
bn_h2pc_dimension_means = [bnlearn_h2pc_dimension_dict_scores["dt"], bnlearn_h2pc_dimension_dict_scores["dt_e"],
bnlearn_h2pc_dimension_dict_scores["rf"], bnlearn_h2pc_dimension_dict_scores["rf_e"],
bnlearn_h2pc_dimension_dict_scores["lr"], bnlearn_h2pc_dimension_dict_scores["lr_l1"],
bnlearn_h2pc_dimension_dict_scores["lr_l2"], bnlearn_h2pc_dimension_dict_scores["lr_e"],
bnlearn_h2pc_dimension_dict_scores["nb"], bnlearn_h2pc_dimension_dict_scores["nb_g"],
bnlearn_h2pc_dimension_dict_scores["nb_m"], bnlearn_h2pc_dimension_dict_scores["nb_c"],
bnlearn_h2pc_dimension_dict_scores["svm"], bnlearn_h2pc_dimension_dict_scores["svm_po"],
bnlearn_h2pc_dimension_dict_scores["svm_r"], bnlearn_h2pc_dimension_dict_scores["knn"],
bnlearn_h2pc_dimension_dict_scores["knn_d"]]
nt_dimension_means = [notears_dimension_dict_scores["dt"], notears_dimension_dict_scores["dt_e"], notears_dimension_dict_scores["rf"], notears_dimension_dict_scores["rf_e"], notears_dimension_dict_scores["lr"], notears_dimension_dict_scores["lr_l1"], notears_dimension_dict_scores["lr_l2"], notears_dimension_dict_scores["lr_e"], notears_dimension_dict_scores["nb"], notears_dimension_dict_scores["nb_g"], notears_dimension_dict_scores["nb_m"], notears_dimension_dict_scores["nb_c"], notears_dimension_dict_scores["svm"], notears_dimension_dict_scores["svm_po"], notears_dimension_dict_scores["svm_r"], notears_dimension_dict_scores["knn"], notears_dimension_dict_scores["knn_d"]]
nt_l2_dimension_means = [notears_l2_dimension_dict_scores["dt"], notears_l2_dimension_dict_scores["dt_e"],
notears_l2_dimension_dict_scores["rf"], notears_l2_dimension_dict_scores["rf_e"],
notears_l2_dimension_dict_scores["lr"], notears_l2_dimension_dict_scores["lr_l1"],
notears_l2_dimension_dict_scores["lr_l2"], notears_l2_dimension_dict_scores["lr_e"],
notears_l2_dimension_dict_scores["nb"], notears_l2_dimension_dict_scores["nb_g"],
notears_l2_dimension_dict_scores["nb_m"], notears_l2_dimension_dict_scores["nb_c"],
notears_l2_dimension_dict_scores["svm"], notears_l2_dimension_dict_scores["svm_po"],
notears_l2_dimension_dict_scores["svm_r"], notears_l2_dimension_dict_scores["knn"],
notears_l2_dimension_dict_scores["knn_d"]]
nt_p_dimension_means = [notears_poisson_dimension_dict_scores["dt"], notears_poisson_dimension_dict_scores["dt_e"],
notears_poisson_dimension_dict_scores["rf"], notears_poisson_dimension_dict_scores["rf_e"],
notears_poisson_dimension_dict_scores["lr"], notears_poisson_dimension_dict_scores["lr_l1"],
notears_poisson_dimension_dict_scores["lr_l2"], notears_poisson_dimension_dict_scores["lr_e"],
notears_poisson_dimension_dict_scores["nb"], notears_poisson_dimension_dict_scores["nb_g"],
notears_poisson_dimension_dict_scores["nb_m"], notears_poisson_dimension_dict_scores["nb_c"],
notears_poisson_dimension_dict_scores["svm"], notears_poisson_dimension_dict_scores["svm_po"],
notears_poisson_dimension_dict_scores["svm_r"], notears_poisson_dimension_dict_scores["knn"],
notears_poisson_dimension_dict_scores["knn_d"]]
p_dimension_means = [pomegranate_exact_dimension_dict_scores["dt"], pomegranate_exact_dimension_dict_scores["dt_e"], pomegranate_exact_dimension_dict_scores["rf"], pomegranate_exact_dimension_dict_scores["rf_e"], pomegranate_exact_dimension_dict_scores["lr"], pomegranate_exact_dimension_dict_scores["lr_l1"], pomegranate_exact_dimension_dict_scores["lr_l2"], pomegranate_exact_dimension_dict_scores["lr_e"], pomegranate_exact_dimension_dict_scores["nb"], pomegranate_exact_dimension_dict_scores["nb_g"], pomegranate_exact_dimension_dict_scores["nb_m"], pomegranate_exact_dimension_dict_scores["nb_c"], pomegranate_exact_dimension_dict_scores["svm"], pomegranate_exact_dimension_dict_scores["svm_po"], pomegranate_exact_dimension_dict_scores["svm_r"], pomegranate_exact_dimension_dict_scores["knn"], pomegranate_exact_dimension_dict_scores["knn_d"]]
p_g_dimension_means = [pomegranate_greedy_dimension_dict_scores["dt"],
pomegranate_greedy_dimension_dict_scores["dt_e"],
pomegranate_greedy_dimension_dict_scores["rf"],
pomegranate_greedy_dimension_dict_scores["rf_e"],
pomegranate_greedy_dimension_dict_scores["lr"],
pomegranate_greedy_dimension_dict_scores["lr_l1"],
pomegranate_greedy_dimension_dict_scores["lr_l2"],
pomegranate_greedy_dimension_dict_scores["lr_e"],
pomegranate_greedy_dimension_dict_scores["nb"],
pomegranate_greedy_dimension_dict_scores["nb_g"],
pomegranate_greedy_dimension_dict_scores["nb_m"],
pomegranate_greedy_dimension_dict_scores["nb_c"],
pomegranate_greedy_dimension_dict_scores["svm"],
pomegranate_greedy_dimension_dict_scores["svm_po"],
pomegranate_greedy_dimension_dict_scores["svm_r"],
pomegranate_greedy_dimension_dict_scores["knn"],
pomegranate_greedy_dimension_dict_scores["knn_d"]]
pgmpy_tree_dimension_means = [pgmpy_tree_dimension_dict_scores["dt"],
pgmpy_tree_dimension_dict_scores["dt_e"],
pgmpy_tree_dimension_dict_scores["rf"],
pgmpy_tree_dimension_dict_scores["rf_e"],
pgmpy_tree_dimension_dict_scores["lr"],
pgmpy_tree_dimension_dict_scores["lr_l1"],
pgmpy_tree_dimension_dict_scores["lr_l2"],
pgmpy_tree_dimension_dict_scores["lr_e"],
pgmpy_tree_dimension_dict_scores["nb"],
pgmpy_tree_dimension_dict_scores["nb_g"],
pgmpy_tree_dimension_dict_scores["nb_m"],
pgmpy_tree_dimension_dict_scores["nb_c"],
pgmpy_tree_dimension_dict_scores["svm"],
pgmpy_tree_dimension_dict_scores["svm_po"],
pgmpy_tree_dimension_dict_scores["svm_r"],
pgmpy_tree_dimension_dict_scores["knn"],
pgmpy_tree_dimension_dict_scores["knn_d"]]
pgmpy_hc_dimension_means = [pgmpy_hc_dimension_dict_scores["dt"],
pgmpy_hc_dimension_dict_scores["dt_e"],
pgmpy_hc_dimension_dict_scores["rf"],
pgmpy_hc_dimension_dict_scores["rf_e"],
pgmpy_hc_dimension_dict_scores["lr"],
pgmpy_hc_dimension_dict_scores["lr_l1"],
pgmpy_hc_dimension_dict_scores["lr_l2"],
pgmpy_hc_dimension_dict_scores["lr_e"],
pgmpy_hc_dimension_dict_scores["nb"],
pgmpy_hc_dimension_dict_scores["nb_g"],
pgmpy_hc_dimension_dict_scores["nb_m"],
pgmpy_hc_dimension_dict_scores["nb_c"],
pgmpy_hc_dimension_dict_scores["svm"],
pgmpy_hc_dimension_dict_scores["svm_po"],
pgmpy_hc_dimension_dict_scores["svm_r"],
pgmpy_hc_dimension_dict_scores["knn"],
pgmpy_hc_dimension_dict_scores["knn_d"]]
pgmpy_mmhc_dimension_means = [pgmpy_mmhc_dimension_dict_scores["dt"],
pgmpy_mmhc_dimension_dict_scores["dt_e"],
pgmpy_mmhc_dimension_dict_scores["rf"],
pgmpy_mmhc_dimension_dict_scores["rf_e"],
pgmpy_mmhc_dimension_dict_scores["lr"],
pgmpy_mmhc_dimension_dict_scores["lr_l1"],
pgmpy_mmhc_dimension_dict_scores["lr_l2"],
pgmpy_mmhc_dimension_dict_scores["lr_e"],
pgmpy_mmhc_dimension_dict_scores["nb"],
pgmpy_mmhc_dimension_dict_scores["nb_g"],
pgmpy_mmhc_dimension_dict_scores["nb_m"],
pgmpy_mmhc_dimension_dict_scores["nb_c"],
pgmpy_mmhc_dimension_dict_scores["svm"],
pgmpy_mmhc_dimension_dict_scores["svm_po"],
pgmpy_mmhc_dimension_dict_scores["svm_r"],
pgmpy_mmhc_dimension_dict_scores["knn"],
pgmpy_mmhc_dimension_dict_scores["knn_d"]]
plt.rcParams["figure.figsize"] = [18, 18]
plt.rcParams["figure.autolayout"] = True
x_axis = np.arange(len(labels))
w = 0.05 # the width of the bars
plt.bar(x_axis +w, bn_dimension_means, width=0.05, label = "BN_LEARN (HC)", color="lightsteelblue")
plt.bar(x_axis + w * 2, nt_dimension_means, width=0.05, label="BN_LEARN (TABU)", color="cornflowerblue")
plt.bar(x_axis + w * 3, bn_mmhc_dimension_means, width=0.05, label="BN_LEARN (MMHC)", color="blue")
plt.bar(x_axis + w * 4, bn_rsmax2_dimension_means, width=0.05, label="BN_LEARN (RSMAX2)", color="mediumblue")
plt.bar(x_axis + w * 5, bn_h2pc_dimension_means, width=0.05, label="BN_LEARN (H2PC)", color="navy")
plt.bar(x_axis +w*6, nt_dimension_means, width=0.05, label="NO_TEARS (logistic)", color="limegreen")
plt.bar(x_axis +w*7, nt_l2_dimension_means, width=0.05, label="NO_TEARS (l2)", color="forestgreen")
plt.bar(x_axis + w * 8, nt_p_dimension_means, width=0.05, label="NO_TEARS (poisson)", color="darkgreen")
plt.bar(x_axis + w * 9, p_dimension_means, width=0.05, label="POMEGRANATE (exact)", color="darkviolet")
plt.bar(x_axis + w * 10, p_g_dimension_means, width=0.05, label="POMEGRANATE (greed)", color="rebeccapurple")
plt.bar(x_axis + w * 11, pgmpy_mmhc_dimension_means, width=0.05, label="PGMPY (MMHC)", color="#FA8072")
plt.bar(x_axis + w * 12, pgmpy_hc_dimension_means, width=0.05, label="PGMPY (HC)", color="#FF2400")
plt.bar(x_axis + w * 13, pgmpy_tree_dimension_means, width=0.05, label="PGMPY (TREE)", color="#7C0A02")
plt.xticks(x_axis, labels)
plt.legend()
plt.style.use("fivethirtyeight")
plt.ylabel('Accuracy')
plt.xlabel('ML Technique', labelpad=15)
plt.title('Dimension Problem - Performance by library on ML technique')
#plt.ylim(0.6, 1)
#plt.tick_params(rotation=45)
plt.savefig('pipeline_summary_benchmark_for_dimension_by_library_groupbar.png', bbox_inches='tight')
plt.show()
#--------------
# Produce Linear Problem by Library on Problem (test set from learned world)
# Group by figure
labels = ['DT_G', 'DT_E', 'RF_G', 'RF_E', 'LR', 'LR_L1', 'LR_L2', 'LR_E', 'NB_B', 'NB_G', 'NB_M', 'NB_C', 'SVM_S',
'SVM_P', 'SVM_R', 'KNN_W', 'KNN_D']
bn_means = [bnlearn_linear_dict_scores_simtest["dt"], bnlearn_linear_dict_scores_simtest["dt_e"], bnlearn_linear_dict_scores_simtest["rf"],
bnlearn_linear_dict_scores_simtest["rf_e"], bnlearn_linear_dict_scores_simtest["lr"],
bnlearn_linear_dict_scores_simtest["lr_l1"], bnlearn_linear_dict_scores_simtest["lr_l2"],
bnlearn_linear_dict_scores_simtest["lr_e"], bnlearn_linear_dict_scores_simtest["nb"],
bnlearn_linear_dict_scores_simtest["nb_g"], bnlearn_linear_dict_scores_simtest["nb_m"],
bnlearn_linear_dict_scores_simtest["nb_c"], bnlearn_linear_dict_scores_simtest["svm"],
bnlearn_linear_dict_scores_simtest["svm_po"], bnlearn_linear_dict_scores_simtest["svm_r"],
bnlearn_linear_dict_scores_simtest["knn"], bnlearn_linear_dict_scores_simtest["knn_d"]]
bn_tabu_means = [bnlearn_tabu_linear_dict_scores_simtest["dt"], bnlearn_tabu_linear_dict_scores_simtest["dt_e"],
bnlearn_tabu_linear_dict_scores_simtest["rf"], bnlearn_tabu_linear_dict_scores_simtest["rf_e"],
bnlearn_tabu_linear_dict_scores_simtest["lr"], bnlearn_tabu_linear_dict_scores_simtest["lr_l1"],
bnlearn_tabu_linear_dict_scores_simtest["lr_l2"], bnlearn_tabu_linear_dict_scores_simtest["lr_e"],
bnlearn_tabu_linear_dict_scores_simtest["nb"], bnlearn_tabu_linear_dict_scores_simtest["nb_g"],
bnlearn_tabu_linear_dict_scores_simtest["nb_m"], bnlearn_tabu_linear_dict_scores_simtest["nb_c"],
bnlearn_tabu_linear_dict_scores_simtest["svm"], bnlearn_tabu_linear_dict_scores_simtest["svm_po"],
bnlearn_tabu_linear_dict_scores_simtest["svm_r"], bnlearn_tabu_linear_dict_scores_simtest["knn"],
bnlearn_tabu_linear_dict_scores_simtest["knn_d"]]
bn_pc_means = [bnlearn_pc_linear_dict_scores_simtest["dt"], bnlearn_pc_linear_dict_scores_simtest["dt_e"],
bnlearn_pc_linear_dict_scores_simtest["rf"], bnlearn_pc_linear_dict_scores_simtest["rf_e"],
bnlearn_pc_linear_dict_scores_simtest["lr"], bnlearn_pc_linear_dict_scores_simtest["lr_l1"],
bnlearn_pc_linear_dict_scores_simtest["lr_l2"], bnlearn_pc_linear_dict_scores_simtest["lr_e"],
bnlearn_pc_linear_dict_scores_simtest["nb"], bnlearn_pc_linear_dict_scores_simtest["nb_g"],
bnlearn_pc_linear_dict_scores_simtest["nb_m"], bnlearn_pc_linear_dict_scores_simtest["nb_c"],
bnlearn_pc_linear_dict_scores_simtest["svm"], bnlearn_pc_linear_dict_scores_simtest["svm_po"],
bnlearn_pc_linear_dict_scores_simtest["svm_r"], bnlearn_pc_linear_dict_scores_simtest["knn"],
bnlearn_pc_linear_dict_scores_simtest["knn_d"]]
bn_mmhc_means = [bnlearn_mmhc_linear_dict_scores_simtest["dt"], bnlearn_mmhc_linear_dict_scores_simtest["dt_e"],
bnlearn_mmhc_linear_dict_scores_simtest["rf"], bnlearn_mmhc_linear_dict_scores_simtest["rf_e"],
bnlearn_mmhc_linear_dict_scores_simtest["lr"], bnlearn_mmhc_linear_dict_scores_simtest["lr_l1"],
bnlearn_mmhc_linear_dict_scores_simtest["lr_l2"], bnlearn_mmhc_linear_dict_scores_simtest["lr_e"],
bnlearn_mmhc_linear_dict_scores_simtest["nb"], bnlearn_mmhc_linear_dict_scores_simtest["nb_g"],
bnlearn_mmhc_linear_dict_scores_simtest["nb_m"], bnlearn_mmhc_linear_dict_scores_simtest["nb_c"],
bnlearn_mmhc_linear_dict_scores_simtest["svm"], bnlearn_mmhc_linear_dict_scores_simtest["svm_po"],
bnlearn_mmhc_linear_dict_scores_simtest["svm_r"], bnlearn_mmhc_linear_dict_scores_simtest["knn"],
bnlearn_mmhc_linear_dict_scores_simtest["knn_d"]]
bn_rsmax2_means = [bnlearn_rsmax2_linear_dict_scores_simtest["dt"], bnlearn_rsmax2_linear_dict_scores_simtest["dt_e"],
bnlearn_rsmax2_linear_dict_scores_simtest["rf"], bnlearn_rsmax2_linear_dict_scores_simtest["rf_e"],
bnlearn_rsmax2_linear_dict_scores_simtest["lr"], bnlearn_rsmax2_linear_dict_scores_simtest["lr_l1"],
bnlearn_rsmax2_linear_dict_scores_simtest["lr_l2"], bnlearn_rsmax2_linear_dict_scores_simtest["lr_e"],
bnlearn_rsmax2_linear_dict_scores_simtest["nb"], bnlearn_rsmax2_linear_dict_scores_simtest["nb_g"],
bnlearn_rsmax2_linear_dict_scores_simtest["nb_m"], bnlearn_rsmax2_linear_dict_scores_simtest["nb_c"],
bnlearn_rsmax2_linear_dict_scores_simtest["svm"], bnlearn_rsmax2_linear_dict_scores_simtest["svm_po"],
bnlearn_rsmax2_linear_dict_scores_simtest["svm_r"], bnlearn_rsmax2_linear_dict_scores_simtest["knn"],
bnlearn_rsmax2_linear_dict_scores_simtest["knn_d"]]
bn_h2pc_means = [bnlearn_h2pc_linear_dict_scores_simtest["dt"], bnlearn_h2pc_linear_dict_scores_simtest["dt_e"],
bnlearn_h2pc_linear_dict_scores_simtest["rf"], bnlearn_h2pc_linear_dict_scores_simtest["rf_e"],
bnlearn_h2pc_linear_dict_scores_simtest["lr"], bnlearn_h2pc_linear_dict_scores_simtest["lr_l1"],
bnlearn_h2pc_linear_dict_scores_simtest["lr_l2"], bnlearn_h2pc_linear_dict_scores_simtest["lr_e"],
bnlearn_h2pc_linear_dict_scores_simtest["nb"], bnlearn_h2pc_linear_dict_scores_simtest["nb_g"],
bnlearn_h2pc_linear_dict_scores_simtest["nb_m"], bnlearn_h2pc_linear_dict_scores_simtest["nb_c"],
bnlearn_h2pc_linear_dict_scores_simtest["svm"], bnlearn_h2pc_linear_dict_scores_simtest["svm_po"],
bnlearn_h2pc_linear_dict_scores_simtest["svm_r"], bnlearn_h2pc_linear_dict_scores_simtest["knn"],
bnlearn_h2pc_linear_dict_scores_simtest["knn_d"]]
nt_means = [notears_linear_dict_scores_simtest["dt"], notears_linear_dict_scores_simtest["dt_e"], notears_linear_dict_scores_simtest["rf"],
notears_linear_dict_scores_simtest["rf_e"], notears_linear_dict_scores_simtest["lr"],
notears_linear_dict_scores_simtest["lr_l1"], notears_linear_dict_scores_simtest["lr_l2"],
notears_linear_dict_scores_simtest["lr_e"], notears_linear_dict_scores_simtest["nb"],
notears_linear_dict_scores_simtest["nb_g"], notears_linear_dict_scores_simtest["nb_m"],
notears_linear_dict_scores_simtest["nb_c"], notears_linear_dict_scores_simtest["svm"],
notears_linear_dict_scores_simtest["svm_po"], notears_linear_dict_scores_simtest["svm_r"],
notears_linear_dict_scores_simtest["knn"], notears_linear_dict_scores_simtest["knn_d"]]
nt_l2_means = [notears_l2_linear_dict_scores_simtest["dt"], notears_l2_linear_dict_scores_simtest["dt_e"],
notears_l2_linear_dict_scores_simtest["rf"], notears_l2_linear_dict_scores_simtest["rf_e"],
notears_l2_linear_dict_scores_simtest["lr"], notears_l2_linear_dict_scores_simtest["lr_l1"],
notears_l2_linear_dict_scores_simtest["lr_l2"], notears_l2_linear_dict_scores_simtest["lr_e"],
notears_l2_linear_dict_scores_simtest["nb"], notears_l2_linear_dict_scores_simtest["nb_g"],
notears_l2_linear_dict_scores_simtest["nb_m"], notears_l2_linear_dict_scores_simtest["nb_c"],
notears_l2_linear_dict_scores_simtest["svm"], notears_l2_linear_dict_scores_simtest["svm_po"],
notears_l2_linear_dict_scores_simtest["svm_r"], notears_l2_linear_dict_scores_simtest["knn"],
notears_l2_linear_dict_scores_simtest["knn_d"]]
nt_p_means = [notears_poisson_linear_dict_scores_simtest["dt"], notears_poisson_linear_dict_scores_simtest["dt_e"],
notears_poisson_linear_dict_scores_simtest["rf"], notears_poisson_linear_dict_scores_simtest["rf_e"],
notears_poisson_linear_dict_scores_simtest["lr"], notears_poisson_linear_dict_scores_simtest["lr_l1"],
notears_poisson_linear_dict_scores_simtest["lr_l2"], notears_poisson_linear_dict_scores_simtest["lr_e"],
notears_poisson_linear_dict_scores_simtest["nb"], notears_poisson_linear_dict_scores_simtest["nb_g"],
notears_poisson_linear_dict_scores_simtest["nb_m"], notears_poisson_linear_dict_scores_simtest["nb_c"],
notears_poisson_linear_dict_scores_simtest["svm"], notears_poisson_linear_dict_scores_simtest["svm_po"],
notears_poisson_linear_dict_scores_simtest["svm_r"], notears_poisson_linear_dict_scores_simtest["knn"],
notears_poisson_linear_dict_scores_simtest["knn_d"]]
p_means = [pomegranate_exact_linear_dict_scores_simtest["dt"], pomegranate_exact_linear_dict_scores_simtest["dt_e"],
pomegranate_exact_linear_dict_scores_simtest["rf"], pomegranate_exact_linear_dict_scores_simtest["rf_e"],
pomegranate_exact_linear_dict_scores_simtest["lr"], pomegranate_exact_linear_dict_scores_simtest["lr_l1"],
pomegranate_exact_linear_dict_scores_simtest["lr_l2"], pomegranate_exact_linear_dict_scores_simtest["lr_e"],
pomegranate_exact_linear_dict_scores_simtest["nb"], pomegranate_exact_linear_dict_scores_simtest["nb_g"],
pomegranate_exact_linear_dict_scores_simtest["nb_m"], pomegranate_exact_linear_dict_scores_simtest["nb_c"],
pomegranate_exact_linear_dict_scores_simtest["svm"], pomegranate_exact_linear_dict_scores_simtest["svm_po"],
pomegranate_exact_linear_dict_scores_simtest["svm_r"], pomegranate_exact_linear_dict_scores_simtest["knn"],
pomegranate_exact_linear_dict_scores_simtest["knn_d"]]
p_g_means = [pomegranate_greedy_linear_dict_scores_simtest["dt"],
pomegranate_greedy_linear_dict_scores_simtest["dt_e"],
pomegranate_greedy_linear_dict_scores_simtest["rf"],
pomegranate_greedy_linear_dict_scores_simtest["rf_e"],
pomegranate_greedy_linear_dict_scores_simtest["lr"],
pomegranate_greedy_linear_dict_scores_simtest["lr_l1"],
pomegranate_greedy_linear_dict_scores_simtest["lr_l2"],
pomegranate_greedy_linear_dict_scores_simtest["lr_e"],
pomegranate_greedy_linear_dict_scores_simtest["nb"],
pomegranate_greedy_linear_dict_scores_simtest["nb_g"],
pomegranate_greedy_linear_dict_scores_simtest["nb_m"],
pomegranate_greedy_linear_dict_scores_simtest["nb_c"],
pomegranate_greedy_linear_dict_scores_simtest["svm"],
pomegranate_greedy_linear_dict_scores_simtest["svm_po"],
pomegranate_greedy_linear_dict_scores_simtest["svm_r"],
pomegranate_greedy_linear_dict_scores_simtest["knn"],
pomegranate_greedy_linear_dict_scores_simtest["knn_d"]]
pgmpy_tree_means = [pgmpy_tree_linear_dict_scores_simtest["dt"],
pgmpy_tree_linear_dict_scores_simtest["dt_e"],
pgmpy_tree_linear_dict_scores_simtest["rf"],
pgmpy_tree_linear_dict_scores_simtest["rf_e"],
pgmpy_tree_linear_dict_scores_simtest["lr"],
pgmpy_tree_linear_dict_scores_simtest["lr_l1"],
pgmpy_tree_linear_dict_scores_simtest["lr_l2"],
pgmpy_tree_linear_dict_scores_simtest["lr_e"],
pgmpy_tree_linear_dict_scores_simtest["nb"],
pgmpy_tree_linear_dict_scores_simtest["nb_g"],
pgmpy_tree_linear_dict_scores_simtest["nb_m"],
pgmpy_tree_linear_dict_scores_simtest["nb_c"],
pgmpy_tree_linear_dict_scores_simtest["svm"],
pgmpy_tree_linear_dict_scores_simtest["svm_po"],
pgmpy_tree_linear_dict_scores_simtest["svm_r"],
pgmpy_tree_linear_dict_scores_simtest["knn"],
pgmpy_tree_linear_dict_scores_simtest["knn_d"]]
pgmpy_hc_means = [pgmpy_hc_linear_dict_scores_simtest["dt"],
pgmpy_hc_linear_dict_scores_simtest["dt_e"],
pgmpy_hc_linear_dict_scores_simtest["rf"],
pgmpy_hc_linear_dict_scores_simtest["rf_e"],
pgmpy_hc_linear_dict_scores_simtest["lr"],
pgmpy_hc_linear_dict_scores_simtest["lr_l1"],
pgmpy_hc_linear_dict_scores_simtest["lr_l2"],
pgmpy_hc_linear_dict_scores_simtest["lr_e"],
pgmpy_hc_linear_dict_scores_simtest["nb"],
pgmpy_hc_linear_dict_scores_simtest["nb_g"],
pgmpy_hc_linear_dict_scores_simtest["nb_m"],
pgmpy_hc_linear_dict_scores_simtest["nb_c"],
pgmpy_hc_linear_dict_scores_simtest["svm"],
pgmpy_hc_linear_dict_scores_simtest["svm_po"],
pgmpy_hc_linear_dict_scores_simtest["svm_r"],
pgmpy_hc_linear_dict_scores_simtest["knn"],
pgmpy_hc_linear_dict_scores_simtest["knn_d"]]
pgmpy_mmhc_means = [pgmpy_mmhc_linear_dict_scores_simtest["dt"],
pgmpy_mmhc_linear_dict_scores_simtest["dt_e"],
pgmpy_mmhc_linear_dict_scores_simtest["rf"],
pgmpy_mmhc_linear_dict_scores_simtest["rf_e"],
pgmpy_mmhc_linear_dict_scores_simtest["lr"],
pgmpy_mmhc_linear_dict_scores_simtest["lr_l1"],
pgmpy_mmhc_linear_dict_scores_simtest["lr_l2"],
pgmpy_mmhc_linear_dict_scores_simtest["lr_e"],
pgmpy_mmhc_linear_dict_scores_simtest["nb"],
pgmpy_mmhc_linear_dict_scores_simtest["nb_g"],
pgmpy_mmhc_linear_dict_scores_simtest["nb_m"],
pgmpy_mmhc_linear_dict_scores_simtest["nb_c"],
pgmpy_mmhc_linear_dict_scores_simtest["svm"],
pgmpy_mmhc_linear_dict_scores_simtest["svm_po"],
pgmpy_mmhc_linear_dict_scores_simtest["svm_r"],
pgmpy_mmhc_linear_dict_scores_simtest["knn"],
pgmpy_mmhc_linear_dict_scores_simtest["knn_d"]]
plt.rcParams["figure.figsize"] = [18, 18]
plt.rcParams["figure.autolayout"] = True
x_axis = np.arange(len(labels))
w = 0.05 # the width of the bars
plt.bar(x_axis + w, bn_means, width=0.05, label="BN_LEARN (HC)", color="lightsteelblue")
plt.bar(x_axis + w * 2, nt_means, width=0.05, label="BN_LEARN (TABU)", color="cornflowerblue")
plt.bar(x_axis + w * 3, bn_pc_means, width=0.05, label="BN_LEARN (PC)", color="royalblue")
plt.bar(x_axis + w * 4, bn_mmhc_means, width=0.05, label="BN_LEARN (MMHC)", color="blue")
plt.bar(x_axis + w * 5, bn_rsmax2_means, width=0.05, label="BN_LEARN (RSMAX2)", color="mediumblue")
plt.bar(x_axis + w * 6, bn_h2pc_means, width=0.05, label="BN_LEARN (H2PC)", color="navy")
plt.bar(x_axis + w * 7, nt_means, width=0.05, label="NO_TEARS (logistic)", color="limegreen")
plt.bar(x_axis + w * 8, nt_l2_means, width=0.05, label="NO_TEARS (l2)", color="forestgreen")
plt.bar(x_axis + w * 9, nt_p_means, width=0.05, label="NO_TEARS (poisson)", color="darkgreen")
plt.bar(x_axis + w * 10, p_means, width=0.05, label="POMEGRANATE (exact)", color="darkviolet")
plt.bar(x_axis + w * 11, p_g_means, width=0.05, label="POMEGRANATE (greed)", color="rebeccapurple")
plt.bar(x_axis + w * 12, pgmpy_mmhc_means, width=0.05, label="PGMPY (MMHC)", color="#FA8072")
plt.bar(x_axis + w * 13, pgmpy_hc_means, width=0.05, label="PGMPY (HC)", color="#FF2400")
plt.bar(x_axis + w * 14, pgmpy_tree_means, width=0.05, label="PGMPY (TREE)", color="#7C0A02")
plt.xticks(x_axis, labels)
plt.legend()
plt.style.use("fivethirtyeight")
plt.ylabel('Accuracy')
plt.xlabel('ML Technique', labelpad=15)
plt.title('Linear Problem - Performance by library on ML technique')
# plt.ylim(0.6, 1)
# plt.tick_params(rotation=45)
plt.savefig('pipeline_summary_benchmark_for_linear_by_library_groupbar_simtest.png', bbox_inches='tight')
plt.show()
# Produce Non-Linear Problem by Library on Problem
# Group by figure
labels = ['DT_G', 'DT_E', 'RF_G', 'RF_E', 'LR', 'LR_L1', 'LR_L2', 'LR_E', 'NB_B', 'NB_G', 'NB_M', 'NB_C', 'SVM_S',
'SVM_P', 'SVM_R', 'KNN_W', 'KNN_D']
bn_non_means = [bnlearn_nonlinear_dict_scores_simtest["dt"], bnlearn_nonlinear_dict_scores_simtest["dt_e"],
bnlearn_nonlinear_dict_scores_simtest["rf"], bnlearn_nonlinear_dict_scores_simtest["rf_e"],
bnlearn_nonlinear_dict_scores_simtest["lr"], bnlearn_nonlinear_dict_scores_simtest["lr_l1"],
bnlearn_nonlinear_dict_scores_simtest["lr_l2"], bnlearn_nonlinear_dict_scores_simtest["lr_e"],
bnlearn_nonlinear_dict_scores_simtest["nb"], bnlearn_nonlinear_dict_scores_simtest["nb_g"],
bnlearn_nonlinear_dict_scores_simtest["nb_m"], bnlearn_nonlinear_dict_scores_simtest["nb_c"],
bnlearn_nonlinear_dict_scores_simtest["svm"], bnlearn_nonlinear_dict_scores_simtest["svm_po"],
bnlearn_nonlinear_dict_scores_simtest["svm_r"], bnlearn_nonlinear_dict_scores_simtest["knn"],
bnlearn_nonlinear_dict_scores_simtest["knn_d"]]
bn_tabu_non_means = [bnlearn_tabu_nonlinear_dict_scores_simtest["dt"],
bnlearn_tabu_nonlinear_dict_scores_simtest["dt_e"],
bnlearn_tabu_nonlinear_dict_scores_simtest["rf"],
bnlearn_tabu_nonlinear_dict_scores_simtest["rf_e"],
bnlearn_tabu_nonlinear_dict_scores_simtest["lr"],
bnlearn_tabu_nonlinear_dict_scores_simtest["lr_l1"],
bnlearn_tabu_nonlinear_dict_scores_simtest["lr_l2"],
bnlearn_tabu_nonlinear_dict_scores_simtest["lr_e"],
bnlearn_tabu_nonlinear_dict_scores_simtest["nb"],
bnlearn_tabu_nonlinear_dict_scores_simtest["nb_g"],
bnlearn_tabu_nonlinear_dict_scores_simtest["nb_m"],
bnlearn_tabu_nonlinear_dict_scores_simtest["nb_c"],
bnlearn_tabu_nonlinear_dict_scores_simtest["svm"],
bnlearn_tabu_nonlinear_dict_scores_simtest["svm_po"],
bnlearn_tabu_nonlinear_dict_scores_simtest["svm_r"],
bnlearn_tabu_nonlinear_dict_scores_simtest["knn"],
bnlearn_tabu_nonlinear_dict_scores_simtest["knn_d"]]
bn_mmhc_non_means = [bnlearn_mmhc_nonlinear_dict_scores_simtest["dt"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["dt_e"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["rf"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["rf_e"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["lr"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["lr_l1"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["lr_l2"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["lr_e"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["nb"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["nb_g"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["nb_m"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["nb_c"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["svm"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["svm_po"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["svm_r"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["knn"],
bnlearn_mmhc_nonlinear_dict_scores_simtest["knn_d"]]
bn_rsmax2_non_means = [bnlearn_rsmax2_nonlinear_dict_scores_simtest["dt"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["dt_e"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["rf"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["rf_e"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["lr"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["lr_l1"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["lr_l2"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["lr_e"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["nb"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["nb_g"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["nb_m"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["nb_c"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["svm"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["svm_po"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["svm_r"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["knn"],
bnlearn_rsmax2_nonlinear_dict_scores_simtest["knn_d"]]
bn_h2pc_non_means = [bnlearn_h2pc_nonlinear_dict_scores_simtest["dt"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["dt_e"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["rf"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["rf_e"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["lr"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["lr_l1"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["lr_l2"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["lr_e"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["nb"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["nb_g"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["nb_m"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["nb_c"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["svm"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["svm_po"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["svm_r"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["knn"],
bnlearn_h2pc_nonlinear_dict_scores_simtest["knn_d"]]
nt_non_means = [notears_nonlinear_dict_scores_simtest["dt"], notears_nonlinear_dict_scores_simtest["dt_e"],
notears_nonlinear_dict_scores_simtest["rf"], notears_nonlinear_dict_scores_simtest["rf_e"],
notears_nonlinear_dict_scores_simtest["lr"], notears_nonlinear_dict_scores_simtest["lr_l1"],
notears_nonlinear_dict_scores_simtest["lr_l2"], notears_nonlinear_dict_scores_simtest["lr_e"],
notears_nonlinear_dict_scores_simtest["nb"], notears_nonlinear_dict_scores_simtest["nb_g"],
notears_nonlinear_dict_scores_simtest["nb_m"], notears_nonlinear_dict_scores_simtest["nb_c"],
notears_nonlinear_dict_scores_simtest["svm"], notears_nonlinear_dict_scores_simtest["svm_po"],
notears_nonlinear_dict_scores_simtest["svm_r"], notears_nonlinear_dict_scores_simtest["knn"],
notears_nonlinear_dict_scores_simtest["knn_d"]]
nt_l2_non_means = [notears_l2_nonlinear_dict_scores_simtest["dt"],
notears_l2_nonlinear_dict_scores_simtest["dt_e"],
notears_l2_nonlinear_dict_scores_simtest["rf"],
notears_l2_nonlinear_dict_scores_simtest["rf_e"],
notears_l2_nonlinear_dict_scores_simtest["lr"],
notears_l2_nonlinear_dict_scores_simtest["lr_l1"],
notears_l2_nonlinear_dict_scores_simtest["lr_l2"],
notears_l2_nonlinear_dict_scores_simtest["lr_e"],
notears_l2_nonlinear_dict_scores_simtest["nb"],
notears_l2_nonlinear_dict_scores_simtest["nb_g"],
notears_l2_nonlinear_dict_scores_simtest["nb_m"],
notears_l2_nonlinear_dict_scores_simtest["nb_c"],
notears_l2_nonlinear_dict_scores_simtest["svm"],
notears_l2_nonlinear_dict_scores_simtest["svm_po"],
notears_l2_nonlinear_dict_scores_simtest["svm_r"],
notears_l2_nonlinear_dict_scores_simtest["knn"],
notears_l2_nonlinear_dict_scores_simtest["knn_d"]]
nt_p_non_means = [notears_poisson_nonlinear_dict_scores_simtest["dt"],
notears_poisson_nonlinear_dict_scores_simtest["dt_e"],
notears_poisson_nonlinear_dict_scores_simtest["rf"],
notears_poisson_nonlinear_dict_scores_simtest["rf_e"],
notears_poisson_nonlinear_dict_scores_simtest["lr"],
notears_poisson_nonlinear_dict_scores_simtest["lr_l1"],
notears_poisson_nonlinear_dict_scores_simtest["lr_l2"],
notears_poisson_nonlinear_dict_scores_simtest["lr_e"],
notears_poisson_nonlinear_dict_scores_simtest["nb"],
notears_poisson_nonlinear_dict_scores_simtest["nb_g"],
notears_poisson_nonlinear_dict_scores_simtest["nb_m"],
notears_poisson_nonlinear_dict_scores_simtest["nb_c"],
notears_poisson_nonlinear_dict_scores_simtest["svm"],
notears_poisson_nonlinear_dict_scores_simtest["svm_po"],
notears_poisson_nonlinear_dict_scores_simtest["svm_r"],
notears_poisson_nonlinear_dict_scores_simtest["knn"],
notears_poisson_nonlinear_dict_scores_simtest["knn_d"]]
p_non_means = [pomegranate_exact_nonlinear_dict_scores_simtest["dt"],
pomegranate_exact_nonlinear_dict_scores_simtest["dt_e"],
pomegranate_exact_nonlinear_dict_scores_simtest["rf"],
pomegranate_exact_nonlinear_dict_scores_simtest["rf_e"],
pomegranate_exact_nonlinear_dict_scores_simtest["lr"],
pomegranate_exact_nonlinear_dict_scores_simtest["lr_l1"],
pomegranate_exact_nonlinear_dict_scores_simtest["lr_l2"],
pomegranate_exact_nonlinear_dict_scores_simtest["lr_e"],
pomegranate_exact_nonlinear_dict_scores_simtest["nb"],
pomegranate_exact_nonlinear_dict_scores_simtest["nb_g"],
pomegranate_exact_nonlinear_dict_scores_simtest["nb_m"],
pomegranate_exact_nonlinear_dict_scores_simtest["nb_c"],
pomegranate_exact_nonlinear_dict_scores_simtest["svm"],
pomegranate_exact_nonlinear_dict_scores_simtest["svm_po"],
pomegranate_exact_nonlinear_dict_scores_simtest["svm_r"],
pomegranate_exact_nonlinear_dict_scores_simtest["knn"],
pomegranate_exact_nonlinear_dict_scores_simtest["knn_d"]]
p_g_non_means = [pomegranate_greedy_nonlinear_dict_scores_simtest["dt"],
pomegranate_greedy_nonlinear_dict_scores_simtest["dt_e"],
pomegranate_greedy_nonlinear_dict_scores_simtest["rf"],
pomegranate_greedy_nonlinear_dict_scores_simtest["rf_e"],
pomegranate_greedy_nonlinear_dict_scores_simtest["lr"],
pomegranate_greedy_nonlinear_dict_scores_simtest["lr_l1"],
pomegranate_greedy_nonlinear_dict_scores_simtest["lr_l2"],
pomegranate_greedy_nonlinear_dict_scores_simtest["lr_e"],
pomegranate_greedy_nonlinear_dict_scores_simtest["nb"],
pomegranate_greedy_nonlinear_dict_scores_simtest["nb_g"],
pomegranate_greedy_nonlinear_dict_scores_simtest["nb_m"],
pomegranate_greedy_nonlinear_dict_scores_simtest["nb_c"],
pomegranate_greedy_nonlinear_dict_scores_simtest["svm"],
pomegranate_greedy_nonlinear_dict_scores_simtest["svm_po"],
pomegranate_greedy_nonlinear_dict_scores_simtest["svm_r"],
pomegranate_greedy_nonlinear_dict_scores_simtest["knn"],
pomegranate_greedy_nonlinear_dict_scores_simtest["knn_d"]]
pgmpy_tree_non_means = [pgmpy_tree_nonlinear_dict_scores_simtest["dt"],
pgmpy_tree_nonlinear_dict_scores_simtest["dt_e"],
pgmpy_tree_nonlinear_dict_scores_simtest["rf"],
pgmpy_tree_nonlinear_dict_scores_simtest["rf_e"],
pgmpy_tree_nonlinear_dict_scores_simtest["lr"],
pgmpy_tree_nonlinear_dict_scores_simtest["lr_l1"],
pgmpy_tree_nonlinear_dict_scores_simtest["lr_l2"],
pgmpy_tree_nonlinear_dict_scores_simtest["lr_e"],
pgmpy_tree_nonlinear_dict_scores_simtest["nb"],
pgmpy_tree_nonlinear_dict_scores_simtest["nb_g"],
pgmpy_tree_nonlinear_dict_scores_simtest["nb_m"],
pgmpy_tree_nonlinear_dict_scores_simtest["nb_c"],
pgmpy_tree_nonlinear_dict_scores_simtest["svm"],
pgmpy_tree_nonlinear_dict_scores_simtest["svm_po"],
pgmpy_tree_nonlinear_dict_scores_simtest["svm_r"],
pgmpy_tree_nonlinear_dict_scores_simtest["knn"],
pgmpy_tree_nonlinear_dict_scores_simtest["knn_d"]]
pgmpy_hc_non_means = [pgmpy_hc_nonlinear_dict_scores_simtest["dt"],
pgmpy_hc_nonlinear_dict_scores_simtest["dt_e"],
pgmpy_hc_nonlinear_dict_scores_simtest["rf"],
pgmpy_hc_nonlinear_dict_scores_simtest["rf_e"],
pgmpy_hc_nonlinear_dict_scores_simtest["lr"],
pgmpy_hc_nonlinear_dict_scores_simtest["lr_l1"],
pgmpy_hc_nonlinear_dict_scores_simtest["lr_l2"],
pgmpy_hc_nonlinear_dict_scores_simtest["lr_e"],
pgmpy_hc_nonlinear_dict_scores_simtest["nb"],
pgmpy_hc_nonlinear_dict_scores_simtest["nb_g"],
pgmpy_hc_nonlinear_dict_scores_simtest["nb_m"],
pgmpy_hc_nonlinear_dict_scores_simtest["nb_c"],
pgmpy_hc_nonlinear_dict_scores_simtest["svm"],
pgmpy_hc_nonlinear_dict_scores_simtest["svm_po"],
pgmpy_hc_nonlinear_dict_scores_simtest["svm_r"],
pgmpy_hc_nonlinear_dict_scores_simtest["knn"],
pgmpy_hc_nonlinear_dict_scores_simtest["knn_d"]]
pgmpy_mmhc_non_means = [pgmpy_mmhc_nonlinear_dict_scores_simtest["dt"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["dt_e"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["rf"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["rf_e"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["lr"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["lr_l1"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["lr_l2"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["lr_e"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["nb"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["nb_g"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["nb_m"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["nb_c"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["svm"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["svm_po"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["svm_r"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["knn"],
pgmpy_mmhc_nonlinear_dict_scores_simtest["knn_d"]]
plt.rcParams["figure.figsize"] = [18, 18]
plt.rcParams["figure.autolayout"] = True
x_axis = np.arange(len(labels))
w = 0.05 # the width of the bars
plt.bar(x_axis + w, bn_non_means, width=0.05, label="BN_LEARN (HC)", color="lightsteelblue")
plt.bar(x_axis + w * 2, nt_non_means, width=0.05, label="BN_LEARN (TABU)", color="cornflowerblue")
plt.bar(x_axis + w * 3, bn_mmhc_non_means, width=0.05, label="BN_LEARN (MMHC)", color="blue")
plt.bar(x_axis + w * 4, bn_rsmax2_non_means, width=0.05, label="BN_LEARN (RSMAX2)", color="mediumblue")
plt.bar(x_axis + w * 5, bn_h2pc_non_means, width=0.05, label="BN_LEARN (H2PC)", color="navy")
plt.bar(x_axis + w * 6, nt_non_means, width=0.05, label="NO_TEARS (logistic)", color="limegreen")
plt.bar(x_axis + w * 7, nt_l2_non_means, width=0.05, label="NO_TEARS (l2)", color="forestgreen")
plt.bar(x_axis + w * 8, nt_p_non_means, width=0.05, label="NO_TEARS (poisson)", color="darkgreen")
plt.bar(x_axis + w * 9, p_non_means, width=0.05, label="POMEGRANATE (exact)", color="darkviolet")
plt.bar(x_axis + w * 10, p_g_non_means, width=0.05, label="POMEGRANATE (greed)", color="rebeccapurple")
plt.bar(x_axis + w * 11, pgmpy_mmhc_non_means, width=0.05, label="PGMPY (MMHC)", color="#FA8072")
plt.bar(x_axis + w * 12, pgmpy_hc_non_means, width=0.05, label="PGMPY (HC)", color="#FF2400")
plt.bar(x_axis + w * 13, pgmpy_tree_non_means, width=0.05, label="PGMPY (TREE)", color="#7C0A02")
plt.xticks(x_axis, labels)
plt.legend()
plt.style.use("fivethirtyeight")
plt.ylabel('Accuracy')
plt.xlabel('ML Technique', labelpad=15)
plt.title('Non-Linear Problem - Performance by library on ML technique')
# plt.ylim(0.6, 1)
# plt.tick_params(rotation=45)
plt.savefig('pipeline_summary_benchmark_for_nonlinear_by_library_groupbar_simtest.png', bbox_inches='tight')
plt.show()
# Produce Sparse Problem by Library on Problem
# Group by figure
labels = ['DT_G', 'DT_E', 'RF_G', 'RF_E', 'LR', 'LR_L1', 'LR_L2', 'LR_E', 'NB_B', 'NB_G', 'NB_M', 'NB_C', 'SVM_S',
'SVM_P', 'SVM_R', 'KNN_W', 'KNN_D']
bn_sparse_means = [bnlearn_sparse_dict_scores_simtest["dt"], bnlearn_sparse_dict_scores_simtest["dt_e"],
bnlearn_sparse_dict_scores_simtest["rf"], bnlearn_sparse_dict_scores_simtest["rf_e"],
bnlearn_sparse_dict_scores_simtest["lr"], bnlearn_sparse_dict_scores_simtest["lr_l1"],
bnlearn_sparse_dict_scores_simtest["lr_l2"], bnlearn_sparse_dict_scores_simtest["lr_e"],
bnlearn_sparse_dict_scores_simtest["nb"], bnlearn_sparse_dict_scores_simtest["nb_g"],
bnlearn_sparse_dict_scores_simtest["nb_m"], bnlearn_sparse_dict_scores_simtest["nb_c"],
bnlearn_sparse_dict_scores_simtest["svm"], bnlearn_sparse_dict_scores_simtest["svm_po"],
bnlearn_sparse_dict_scores_simtest["svm_r"], bnlearn_sparse_dict_scores_simtest["knn"],
bnlearn_sparse_dict_scores_simtest["knn_d"]]
bn_tabu_sparse_means = [bnlearn_tabu_sparse_dict_scores_simtest["dt"], bnlearn_tabu_sparse_dict_scores_simtest["dt_e"],
bnlearn_tabu_sparse_dict_scores_simtest["rf"], bnlearn_tabu_sparse_dict_scores_simtest["rf_e"],
bnlearn_tabu_sparse_dict_scores_simtest["lr"], bnlearn_tabu_sparse_dict_scores_simtest["lr_l1"],
bnlearn_tabu_sparse_dict_scores_simtest["lr_l2"], bnlearn_tabu_sparse_dict_scores_simtest["lr_e"],
bnlearn_tabu_sparse_dict_scores_simtest["nb"], bnlearn_tabu_sparse_dict_scores_simtest["nb_g"],
bnlearn_tabu_sparse_dict_scores_simtest["nb_m"], bnlearn_tabu_sparse_dict_scores_simtest["nb_c"],
bnlearn_tabu_sparse_dict_scores_simtest["svm"], bnlearn_tabu_sparse_dict_scores_simtest["svm_po"],
bnlearn_tabu_sparse_dict_scores_simtest["svm_r"], bnlearn_tabu_sparse_dict_scores_simtest["knn"],
bnlearn_tabu_sparse_dict_scores_simtest["knn_d"]]
bn_mmhc_sparse_means = [bnlearn_mmhc_sparse_dict_scores_simtest["dt"], bnlearn_mmhc_sparse_dict_scores_simtest["dt_e"],
bnlearn_mmhc_sparse_dict_scores_simtest["rf"], bnlearn_mmhc_sparse_dict_scores_simtest["rf_e"],
bnlearn_mmhc_sparse_dict_scores_simtest["lr"], bnlearn_mmhc_sparse_dict_scores_simtest["lr_l1"],
bnlearn_mmhc_sparse_dict_scores_simtest["lr_l2"], bnlearn_mmhc_sparse_dict_scores_simtest["lr_e"],
bnlearn_mmhc_sparse_dict_scores_simtest["nb"], bnlearn_mmhc_sparse_dict_scores_simtest["nb_g"],
bnlearn_mmhc_sparse_dict_scores_simtest["nb_m"], bnlearn_mmhc_sparse_dict_scores_simtest["nb_c"],
bnlearn_mmhc_sparse_dict_scores_simtest["svm"], bnlearn_mmhc_sparse_dict_scores_simtest["svm_po"],
bnlearn_mmhc_sparse_dict_scores_simtest["svm_r"], bnlearn_mmhc_sparse_dict_scores_simtest["knn"],
bnlearn_mmhc_sparse_dict_scores_simtest["knn_d"]]
bn_rsmax2_sparse_means = [bnlearn_rsmax2_sparse_dict_scores_simtest["dt"], bnlearn_rsmax2_sparse_dict_scores_simtest["dt_e"],
bnlearn_rsmax2_sparse_dict_scores_simtest["rf"], bnlearn_rsmax2_sparse_dict_scores_simtest["rf_e"],
bnlearn_rsmax2_sparse_dict_scores_simtest["lr"], bnlearn_rsmax2_sparse_dict_scores_simtest["lr_l1"],
bnlearn_rsmax2_sparse_dict_scores_simtest["lr_l2"], bnlearn_rsmax2_sparse_dict_scores_simtest["lr_e"],
bnlearn_rsmax2_sparse_dict_scores_simtest["nb"], bnlearn_rsmax2_sparse_dict_scores_simtest["nb_g"],
bnlearn_rsmax2_sparse_dict_scores_simtest["nb_m"], bnlearn_rsmax2_sparse_dict_scores_simtest["nb_c"],
bnlearn_rsmax2_sparse_dict_scores_simtest["svm"], bnlearn_rsmax2_sparse_dict_scores_simtest["svm_po"],
bnlearn_rsmax2_sparse_dict_scores_simtest["svm_r"], bnlearn_rsmax2_sparse_dict_scores_simtest["knn"],
bnlearn_rsmax2_sparse_dict_scores_simtest["knn_d"]]
bn_h2pc_sparse_means = [bnlearn_h2pc_sparse_dict_scores_simtest["dt"], bnlearn_h2pc_sparse_dict_scores_simtest["dt_e"],
bnlearn_h2pc_sparse_dict_scores_simtest["rf"], bnlearn_h2pc_sparse_dict_scores_simtest["rf_e"],
bnlearn_h2pc_sparse_dict_scores_simtest["lr"], bnlearn_h2pc_sparse_dict_scores_simtest["lr_l1"],
bnlearn_h2pc_sparse_dict_scores_simtest["lr_l2"], bnlearn_h2pc_sparse_dict_scores_simtest["lr_e"],
bnlearn_h2pc_sparse_dict_scores_simtest["nb"], bnlearn_h2pc_sparse_dict_scores_simtest["nb_g"],
bnlearn_h2pc_sparse_dict_scores_simtest["nb_m"], bnlearn_h2pc_sparse_dict_scores_simtest["nb_c"],
bnlearn_h2pc_sparse_dict_scores_simtest["svm"], bnlearn_h2pc_sparse_dict_scores_simtest["svm_po"],
bnlearn_h2pc_sparse_dict_scores_simtest["svm_r"], bnlearn_h2pc_sparse_dict_scores_simtest["knn"],
bnlearn_h2pc_sparse_dict_scores_simtest["knn_d"]]
nt_sparse_means = [notears_sparse_dict_scores_simtest["dt"], notears_sparse_dict_scores_simtest["dt_e"],
notears_sparse_dict_scores_simtest["rf"], notears_sparse_dict_scores_simtest["rf_e"],
notears_sparse_dict_scores_simtest["lr"], notears_sparse_dict_scores_simtest["lr_l1"],
notears_sparse_dict_scores_simtest["lr_l2"], notears_sparse_dict_scores_simtest["lr_e"],
notears_sparse_dict_scores_simtest["nb"], notears_sparse_dict_scores_simtest["nb_g"],
notears_sparse_dict_scores_simtest["nb_m"], notears_sparse_dict_scores_simtest["nb_c"],
notears_sparse_dict_scores_simtest["svm"], notears_sparse_dict_scores_simtest["svm_po"],
notears_sparse_dict_scores_simtest["svm_r"], notears_sparse_dict_scores_simtest["knn"],
notears_sparse_dict_scores_simtest["knn_d"]]
nt_l2_sparse_means = [notears_l2_sparse_dict_scores_simtest["dt"], notears_l2_sparse_dict_scores_simtest["dt_e"],
notears_l2_sparse_dict_scores_simtest["rf"], notears_l2_sparse_dict_scores_simtest["rf_e"],
notears_l2_sparse_dict_scores_simtest["lr"], notears_l2_sparse_dict_scores_simtest["lr_l1"],
notears_l2_sparse_dict_scores_simtest["lr_l2"], notears_l2_sparse_dict_scores_simtest["lr_e"],
notears_l2_sparse_dict_scores_simtest["nb"], notears_l2_sparse_dict_scores_simtest["nb_g"],
notears_l2_sparse_dict_scores_simtest["nb_m"], notears_l2_sparse_dict_scores_simtest["nb_c"],
notears_l2_sparse_dict_scores_simtest["svm"], notears_l2_sparse_dict_scores_simtest["svm_po"],
notears_l2_sparse_dict_scores_simtest["svm_r"], notears_l2_sparse_dict_scores_simtest["knn"],
notears_l2_sparse_dict_scores_simtest["knn_d"]]
nt_p_sparse_means = [notears_poisson_sparse_dict_scores_simtest["dt"], notears_poisson_sparse_dict_scores_simtest["dt_e"],
notears_poisson_sparse_dict_scores_simtest["rf"], notears_poisson_sparse_dict_scores_simtest["rf_e"],
notears_poisson_sparse_dict_scores_simtest["lr"], notears_poisson_sparse_dict_scores_simtest["lr_l1"],
notears_poisson_sparse_dict_scores_simtest["lr_l2"], notears_poisson_sparse_dict_scores_simtest["lr_e"],
notears_poisson_sparse_dict_scores_simtest["nb"], notears_poisson_sparse_dict_scores_simtest["nb_g"],
notears_poisson_sparse_dict_scores_simtest["nb_m"], notears_poisson_sparse_dict_scores_simtest["nb_c"],
notears_poisson_sparse_dict_scores_simtest["svm"], notears_poisson_sparse_dict_scores_simtest["svm_po"],
notears_poisson_sparse_dict_scores_simtest["svm_r"], notears_poisson_sparse_dict_scores_simtest["knn"],
notears_poisson_sparse_dict_scores_simtest["knn_d"]]
p_sparse_means = [pomegranate_exact_sparse_dict_scores_simtest["dt"], pomegranate_exact_sparse_dict_scores_simtest["dt_e"],
pomegranate_exact_sparse_dict_scores_simtest["rf"], pomegranate_exact_sparse_dict_scores_simtest["rf_e"],
pomegranate_exact_sparse_dict_scores_simtest["lr"], pomegranate_exact_sparse_dict_scores_simtest["lr_l1"],
pomegranate_exact_sparse_dict_scores_simtest["lr_l2"], pomegranate_exact_sparse_dict_scores_simtest["lr_e"],
pomegranate_exact_sparse_dict_scores_simtest["nb"], pomegranate_exact_sparse_dict_scores_simtest["nb_g"],
pomegranate_exact_sparse_dict_scores_simtest["nb_m"], pomegranate_exact_sparse_dict_scores_simtest["nb_c"],
pomegranate_exact_sparse_dict_scores_simtest["svm"], pomegranate_exact_sparse_dict_scores_simtest["svm_po"],
pomegranate_exact_sparse_dict_scores_simtest["svm_r"], pomegranate_exact_sparse_dict_scores_simtest["knn"],
pomegranate_exact_sparse_dict_scores_simtest["knn_d"]]
p_g_sparse_means = [pomegranate_greedy_sparse_dict_scores_simtest["dt"],
pomegranate_greedy_sparse_dict_scores_simtest["dt_e"],
pomegranate_greedy_sparse_dict_scores_simtest["rf"],
pomegranate_greedy_sparse_dict_scores_simtest["rf_e"],
pomegranate_greedy_sparse_dict_scores_simtest["lr"],
pomegranate_greedy_sparse_dict_scores_simtest["lr_l1"],
pomegranate_greedy_sparse_dict_scores_simtest["lr_l2"],
pomegranate_greedy_sparse_dict_scores_simtest["lr_e"],
pomegranate_greedy_sparse_dict_scores_simtest["nb"],
pomegranate_greedy_sparse_dict_scores_simtest["nb_g"],
pomegranate_greedy_sparse_dict_scores_simtest["nb_m"],
pomegranate_greedy_sparse_dict_scores_simtest["nb_c"],
pomegranate_greedy_sparse_dict_scores_simtest["svm"],
pomegranate_greedy_sparse_dict_scores_simtest["svm_po"],
pomegranate_greedy_sparse_dict_scores_simtest["svm_r"],
pomegranate_greedy_sparse_dict_scores_simtest["knn"],
pomegranate_greedy_sparse_dict_scores_simtest["knn_d"]]
pgmpy_tree_sparse_means = [pgmpy_tree_sparse_dict_scores_simtest["dt"],
pgmpy_tree_sparse_dict_scores_simtest["dt_e"],
pgmpy_tree_sparse_dict_scores_simtest["rf"],
pgmpy_tree_sparse_dict_scores_simtest["rf_e"],
pgmpy_tree_sparse_dict_scores_simtest["lr"],
pgmpy_tree_sparse_dict_scores_simtest["lr_l1"],
pgmpy_tree_sparse_dict_scores_simtest["lr_l2"],
pgmpy_tree_sparse_dict_scores_simtest["lr_e"],
pgmpy_tree_sparse_dict_scores_simtest["nb"],
pgmpy_tree_sparse_dict_scores_simtest["nb_g"],
pgmpy_tree_sparse_dict_scores_simtest["nb_m"],
pgmpy_tree_sparse_dict_scores_simtest["nb_c"],
pgmpy_tree_sparse_dict_scores_simtest["svm"],
pgmpy_tree_sparse_dict_scores_simtest["svm_po"],
pgmpy_tree_sparse_dict_scores_simtest["svm_r"],
pgmpy_tree_sparse_dict_scores_simtest["knn"],
pgmpy_tree_sparse_dict_scores_simtest["knn_d"]]
pgmpy_hc_sparse_means = [pgmpy_hc_sparse_dict_scores_simtest["dt"],
pgmpy_hc_sparse_dict_scores_simtest["dt_e"],
pgmpy_hc_sparse_dict_scores_simtest["rf"],
pgmpy_hc_sparse_dict_scores_simtest["rf_e"],
pgmpy_hc_sparse_dict_scores_simtest["lr"],
pgmpy_hc_sparse_dict_scores_simtest["lr_l1"],
pgmpy_hc_sparse_dict_scores_simtest["lr_l2"],
pgmpy_hc_sparse_dict_scores_simtest["lr_e"],
pgmpy_hc_sparse_dict_scores_simtest["nb"],
pgmpy_hc_sparse_dict_scores_simtest["nb_g"],
pgmpy_hc_sparse_dict_scores_simtest["nb_m"],
pgmpy_hc_sparse_dict_scores_simtest["nb_c"],
pgmpy_hc_sparse_dict_scores_simtest["svm"],
pgmpy_hc_sparse_dict_scores_simtest["svm_po"],
pgmpy_hc_sparse_dict_scores_simtest["svm_r"],
pgmpy_hc_sparse_dict_scores_simtest["knn"],
pgmpy_hc_sparse_dict_scores_simtest["knn_d"]]
pgmpy_mmhc_sparse_means = [pgmpy_mmhc_sparse_dict_scores_simtest["dt"],
pgmpy_mmhc_sparse_dict_scores_simtest["dt_e"],
pgmpy_mmhc_sparse_dict_scores_simtest["rf"],
pgmpy_mmhc_sparse_dict_scores_simtest["rf_e"],
pgmpy_mmhc_sparse_dict_scores_simtest["lr"],
pgmpy_mmhc_sparse_dict_scores_simtest["lr_l1"],
pgmpy_mmhc_sparse_dict_scores_simtest["lr_l2"],
pgmpy_mmhc_sparse_dict_scores_simtest["lr_e"],
pgmpy_mmhc_sparse_dict_scores_simtest["nb"],
pgmpy_mmhc_sparse_dict_scores_simtest["nb_g"],
pgmpy_mmhc_sparse_dict_scores_simtest["nb_m"],
pgmpy_mmhc_sparse_dict_scores_simtest["nb_c"],
pgmpy_mmhc_sparse_dict_scores_simtest["svm"],
pgmpy_mmhc_sparse_dict_scores_simtest["svm_po"],
pgmpy_mmhc_sparse_dict_scores_simtest["svm_r"],
pgmpy_mmhc_sparse_dict_scores_simtest["knn"],
pgmpy_mmhc_sparse_dict_scores_simtest["knn_d"]]
plt.rcParams["figure.figsize"] = [18, 18]
plt.rcParams["figure.autolayout"] = True
x_axis = np.arange(len(labels))
w = 0.05 # the width of the bars
plt.bar(x_axis + w, bn_sparse_means, width=0.05, label="BN_LEARN (HC)", color="lightsteelblue")
plt.bar(x_axis + w * 2, nt_sparse_means, width=0.05, label="BN_LEARN (TABU)", color="cornflowerblue")
plt.bar(x_axis + w * 3, bn_mmhc_sparse_means, width=0.05, label="BN_LEARN (MMHC)", color="blue")
plt.bar(x_axis + w * 4, bn_rsmax2_sparse_means, width=0.05, label="BN_LEARN (RSMAX2)", color="mediumblue")
plt.bar(x_axis + w * 5, bn_h2pc_sparse_means, width=0.05, label="BN_LEARN (H2PC)", color="navy")
plt.bar(x_axis + w * 6, nt_sparse_means, width=0.05, label="NO_TEARS (logistic)", color="limegreen")
plt.bar(x_axis + w * 7, nt_l2_sparse_means, width=0.05, label="NO_TEARS (l2)", color="forestgreen")
plt.bar(x_axis + w * 8, nt_p_sparse_means, width=0.05, label="NO_TEARS (poisson)", color="darkgreen")
plt.bar(x_axis + w * 9, p_sparse_means, width=0.05, label="POMEGRANATE (exact)", color="darkviolet")
plt.bar(x_axis + w * 10, p_g_sparse_means, width=0.05, label="POMEGRANATE (greed)", color="rebeccapurple")
plt.bar(x_axis + w * 11, pgmpy_mmhc_sparse_means, width=0.05, label="PGMPY (MMHC)", color="#FA8072")
plt.bar(x_axis + w * 12, pgmpy_hc_sparse_means, width=0.05, label="PGMPY (HC)", color="#FF2400")
plt.bar(x_axis + w * 13, pgmpy_tree_sparse_means, width=0.05, label="PGMPY (TREE)", color="#7C0A02")
plt.xticks(x_axis, labels)
plt.legend()
plt.style.use("fivethirtyeight")
plt.ylabel('Accuracy')
plt.xlabel('ML Technique', labelpad=15)
plt.title('Sparse Problem - Performance by library on ML technique')
# plt.ylim(0.6, 1)
# plt.tick_params(rotation=45)
plt.savefig('pipeline_summary_benchmark_for_sparse_by_library_groupbar_simtest.png', bbox_inches='tight')
plt.show()
# Produce Dimensional Problem by Library on Problem
# Group by figure
labels = ['DT_G', 'DT_E', 'RF_G', 'RF_E', 'LR', 'LR_L1', 'LR_L2', 'LR_E', 'NB_B', 'NB_G', 'NB_M', 'NB_C', 'SVM_S',
'SVM_P', 'SVM_R', 'KNN_W', 'KNN_D']
bn_dimension_means = [bnlearn_dimension_dict_scores_simtest["dt"], bnlearn_dimension_dict_scores_simtest["dt_e"],
bnlearn_dimension_dict_scores_simtest["rf"], bnlearn_dimension_dict_scores_simtest["rf_e"],
bnlearn_dimension_dict_scores_simtest["lr"], bnlearn_dimension_dict_scores_simtest["lr_l1"],
bnlearn_dimension_dict_scores_simtest["lr_l2"], bnlearn_dimension_dict_scores_simtest["lr_e"],
bnlearn_dimension_dict_scores_simtest["nb"], bnlearn_dimension_dict_scores_simtest["nb_g"],
bnlearn_dimension_dict_scores_simtest["nb_m"], bnlearn_dimension_dict_scores_simtest["nb_c"],
bnlearn_dimension_dict_scores_simtest["svm"], bnlearn_dimension_dict_scores_simtest["svm_po"],
bnlearn_dimension_dict_scores_simtest["svm_r"], bnlearn_dimension_dict_scores_simtest["knn"],
bnlearn_dimension_dict_scores_simtest["knn_d"]]
bn_tabu_dimension_means = [bnlearn_tabu_dimension_dict_scores_simtest["dt"], bnlearn_tabu_dimension_dict_scores_simtest["dt_e"],
bnlearn_tabu_dimension_dict_scores_simtest["rf"], bnlearn_tabu_dimension_dict_scores_simtest["rf_e"],
bnlearn_tabu_dimension_dict_scores_simtest["lr"], bnlearn_tabu_dimension_dict_scores_simtest["lr_l1"],
bnlearn_tabu_dimension_dict_scores_simtest["lr_l2"], bnlearn_tabu_dimension_dict_scores_simtest["lr_e"],
bnlearn_tabu_dimension_dict_scores_simtest["nb"], bnlearn_tabu_dimension_dict_scores_simtest["nb_g"],
bnlearn_tabu_dimension_dict_scores_simtest["nb_m"], bnlearn_tabu_dimension_dict_scores_simtest["nb_c"],
bnlearn_tabu_dimension_dict_scores_simtest["svm"], bnlearn_tabu_dimension_dict_scores_simtest["svm_po"],
bnlearn_tabu_dimension_dict_scores_simtest["svm_r"], bnlearn_tabu_dimension_dict_scores_simtest["knn"],
bnlearn_tabu_dimension_dict_scores_simtest["knn_d"]]
bn_mmhc_dimension_means = [bnlearn_mmhc_dimension_dict_scores_simtest["dt"], bnlearn_mmhc_dimension_dict_scores_simtest["dt_e"],
bnlearn_mmhc_dimension_dict_scores_simtest["rf"], bnlearn_mmhc_dimension_dict_scores_simtest["rf_e"],
bnlearn_mmhc_dimension_dict_scores_simtest["lr"], bnlearn_mmhc_dimension_dict_scores_simtest["lr_l1"],
bnlearn_mmhc_dimension_dict_scores_simtest["lr_l2"], bnlearn_mmhc_dimension_dict_scores_simtest["lr_e"],
bnlearn_mmhc_dimension_dict_scores_simtest["nb"], bnlearn_mmhc_dimension_dict_scores_simtest["nb_g"],
bnlearn_mmhc_dimension_dict_scores_simtest["nb_m"], bnlearn_mmhc_dimension_dict_scores_simtest["nb_c"],
bnlearn_mmhc_dimension_dict_scores_simtest["svm"], bnlearn_mmhc_dimension_dict_scores_simtest["svm_po"],
bnlearn_mmhc_dimension_dict_scores_simtest["svm_r"], bnlearn_mmhc_dimension_dict_scores_simtest["knn"],
bnlearn_mmhc_dimension_dict_scores_simtest["knn_d"]]
bn_rsmax2_dimension_means = [bnlearn_rsmax2_dimension_dict_scores_simtest["dt"],
bnlearn_rsmax2_dimension_dict_scores_simtest["dt_e"],
bnlearn_rsmax2_dimension_dict_scores_simtest["rf"],
bnlearn_rsmax2_dimension_dict_scores_simtest["rf_e"],
bnlearn_rsmax2_dimension_dict_scores_simtest["lr"],
bnlearn_rsmax2_dimension_dict_scores_simtest["lr_l1"],
bnlearn_rsmax2_dimension_dict_scores_simtest["lr_l2"],
bnlearn_rsmax2_dimension_dict_scores_simtest["lr_e"],
bnlearn_rsmax2_dimension_dict_scores_simtest["nb"],
bnlearn_rsmax2_dimension_dict_scores_simtest["nb_g"],
bnlearn_rsmax2_dimension_dict_scores_simtest["nb_m"],
bnlearn_rsmax2_dimension_dict_scores_simtest["nb_c"],
bnlearn_rsmax2_dimension_dict_scores_simtest["svm"],
bnlearn_rsmax2_dimension_dict_scores_simtest["svm_po"],
bnlearn_rsmax2_dimension_dict_scores_simtest["svm_r"],
bnlearn_rsmax2_dimension_dict_scores_simtest["knn"],
bnlearn_rsmax2_dimension_dict_scores_simtest["knn_d"]]
bn_h2pc_dimension_means = [bnlearn_h2pc_dimension_dict_scores_simtest["dt"], bnlearn_h2pc_dimension_dict_scores_simtest["dt_e"],
bnlearn_h2pc_dimension_dict_scores_simtest["rf"], bnlearn_h2pc_dimension_dict_scores_simtest["rf_e"],
bnlearn_h2pc_dimension_dict_scores_simtest["lr"], bnlearn_h2pc_dimension_dict_scores_simtest["lr_l1"],
bnlearn_h2pc_dimension_dict_scores_simtest["lr_l2"], bnlearn_h2pc_dimension_dict_scores_simtest["lr_e"],
bnlearn_h2pc_dimension_dict_scores_simtest["nb"], bnlearn_h2pc_dimension_dict_scores_simtest["nb_g"],
bnlearn_h2pc_dimension_dict_scores_simtest["nb_m"], bnlearn_h2pc_dimension_dict_scores_simtest["nb_c"],
bnlearn_h2pc_dimension_dict_scores_simtest["svm"], bnlearn_h2pc_dimension_dict_scores_simtest["svm_po"],
bnlearn_h2pc_dimension_dict_scores_simtest["svm_r"], bnlearn_h2pc_dimension_dict_scores_simtest["knn"],
bnlearn_h2pc_dimension_dict_scores_simtest["knn_d"]]
nt_dimension_means = [notears_dimension_dict_scores_simtest["dt"], notears_dimension_dict_scores_simtest["dt_e"],
notears_dimension_dict_scores_simtest["rf"], notears_dimension_dict_scores_simtest["rf_e"],
notears_dimension_dict_scores_simtest["lr"], notears_dimension_dict_scores_simtest["lr_l1"],
notears_dimension_dict_scores_simtest["lr_l2"], notears_dimension_dict_scores_simtest["lr_e"],
notears_dimension_dict_scores_simtest["nb"], notears_dimension_dict_scores_simtest["nb_g"],
notears_dimension_dict_scores_simtest["nb_m"], notears_dimension_dict_scores_simtest["nb_c"],
notears_dimension_dict_scores_simtest["svm"], notears_dimension_dict_scores_simtest["svm_po"],
notears_dimension_dict_scores_simtest["svm_r"], notears_dimension_dict_scores_simtest["knn"],
notears_dimension_dict_scores_simtest["knn_d"]]
nt_l2_dimension_means = [notears_l2_dimension_dict_scores_simtest["dt"], notears_l2_dimension_dict_scores_simtest["dt_e"],
notears_l2_dimension_dict_scores_simtest["rf"], notears_l2_dimension_dict_scores_simtest["rf_e"],
notears_l2_dimension_dict_scores_simtest["lr"], notears_l2_dimension_dict_scores_simtest["lr_l1"],
notears_l2_dimension_dict_scores_simtest["lr_l2"], notears_l2_dimension_dict_scores_simtest["lr_e"],
notears_l2_dimension_dict_scores_simtest["nb"], notears_l2_dimension_dict_scores_simtest["nb_g"],
notears_l2_dimension_dict_scores_simtest["nb_m"], notears_l2_dimension_dict_scores_simtest["nb_c"],
notears_l2_dimension_dict_scores_simtest["svm"], notears_l2_dimension_dict_scores_simtest["svm_po"],
notears_l2_dimension_dict_scores_simtest["svm_r"], notears_l2_dimension_dict_scores_simtest["knn"],
notears_l2_dimension_dict_scores_simtest["knn_d"]]
nt_p_dimension_means = [notears_poisson_dimension_dict_scores_simtest["dt"], notears_poisson_dimension_dict_scores_simtest["dt_e"],
notears_poisson_dimension_dict_scores_simtest["rf"], notears_poisson_dimension_dict_scores_simtest["rf_e"],
notears_poisson_dimension_dict_scores_simtest["lr"], notears_poisson_dimension_dict_scores_simtest["lr_l1"],
notears_poisson_dimension_dict_scores_simtest["lr_l2"],
notears_poisson_dimension_dict_scores_simtest["lr_e"],
notears_poisson_dimension_dict_scores_simtest["nb"], notears_poisson_dimension_dict_scores_simtest["nb_g"],
notears_poisson_dimension_dict_scores_simtest["nb_m"],
notears_poisson_dimension_dict_scores_simtest["nb_c"],
notears_poisson_dimension_dict_scores_simtest["svm"],
notears_poisson_dimension_dict_scores_simtest["svm_po"],
notears_poisson_dimension_dict_scores_simtest["svm_r"],
notears_poisson_dimension_dict_scores_simtest["knn"],
notears_poisson_dimension_dict_scores_simtest["knn_d"]]
p_dimension_means = [pomegranate_exact_dimension_dict_scores_simtest["dt"], pomegranate_exact_dimension_dict_scores_simtest["dt_e"],
pomegranate_exact_dimension_dict_scores_simtest["rf"], pomegranate_exact_dimension_dict_scores_simtest["rf_e"],
pomegranate_exact_dimension_dict_scores_simtest["lr"],
pomegranate_exact_dimension_dict_scores_simtest["lr_l1"],
pomegranate_exact_dimension_dict_scores_simtest["lr_l2"],
pomegranate_exact_dimension_dict_scores_simtest["lr_e"], pomegranate_exact_dimension_dict_scores_simtest["nb"],
pomegranate_exact_dimension_dict_scores_simtest["nb_g"],
pomegranate_exact_dimension_dict_scores_simtest["nb_m"],
pomegranate_exact_dimension_dict_scores_simtest["nb_c"],
pomegranate_exact_dimension_dict_scores_simtest["svm"],
pomegranate_exact_dimension_dict_scores_simtest["svm_po"],
pomegranate_exact_dimension_dict_scores_simtest["svm_r"],
pomegranate_exact_dimension_dict_scores_simtest["knn"],
pomegranate_exact_dimension_dict_scores_simtest["knn_d"]]
p_g_dimension_means = [pomegranate_greedy_dimension_dict_scores_simtest["dt"],
pomegranate_greedy_dimension_dict_scores_simtest["dt_e"],
pomegranate_greedy_dimension_dict_scores_simtest["rf"],
pomegranate_greedy_dimension_dict_scores_simtest["rf_e"],
pomegranate_greedy_dimension_dict_scores_simtest["lr"],
pomegranate_greedy_dimension_dict_scores_simtest["lr_l1"],
pomegranate_greedy_dimension_dict_scores_simtest["lr_l2"],
pomegranate_greedy_dimension_dict_scores_simtest["lr_e"],
pomegranate_greedy_dimension_dict_scores_simtest["nb"],
pomegranate_greedy_dimension_dict_scores_simtest["nb_g"],
pomegranate_greedy_dimension_dict_scores_simtest["nb_m"],
pomegranate_greedy_dimension_dict_scores_simtest["nb_c"],
pomegranate_greedy_dimension_dict_scores_simtest["svm"],
pomegranate_greedy_dimension_dict_scores_simtest["svm_po"],
pomegranate_greedy_dimension_dict_scores_simtest["svm_r"],
pomegranate_greedy_dimension_dict_scores_simtest["knn"],
pomegranate_greedy_dimension_dict_scores_simtest["knn_d"]]
pgmpy_tree_dimension_means = [pgmpy_tree_dimension_dict_scores_simtest["dt"],
pgmpy_tree_dimension_dict_scores_simtest["dt_e"],
pgmpy_tree_dimension_dict_scores_simtest["rf"],
pgmpy_tree_dimension_dict_scores_simtest["rf_e"],
pgmpy_tree_dimension_dict_scores_simtest["lr"],
pgmpy_tree_dimension_dict_scores_simtest["lr_l1"],
pgmpy_tree_dimension_dict_scores_simtest["lr_l2"],
pgmpy_tree_dimension_dict_scores_simtest["lr_e"],
pgmpy_tree_dimension_dict_scores_simtest["nb"],
pgmpy_tree_dimension_dict_scores_simtest["nb_g"],
pgmpy_tree_dimension_dict_scores_simtest["nb_m"],
pgmpy_tree_dimension_dict_scores_simtest["nb_c"],
pgmpy_tree_dimension_dict_scores_simtest["svm"],
pgmpy_tree_dimension_dict_scores_simtest["svm_po"],
pgmpy_tree_dimension_dict_scores_simtest["svm_r"],
pgmpy_tree_dimension_dict_scores_simtest["knn"],
pgmpy_tree_dimension_dict_scores_simtest["knn_d"]]
pgmpy_hc_dimension_means = [pgmpy_hc_dimension_dict_scores_simtest["dt"],
pgmpy_hc_dimension_dict_scores_simtest["dt_e"],
pgmpy_hc_dimension_dict_scores_simtest["rf"],
pgmpy_hc_dimension_dict_scores_simtest["rf_e"],
pgmpy_hc_dimension_dict_scores_simtest["lr"],
pgmpy_hc_dimension_dict_scores_simtest["lr_l1"],
pgmpy_hc_dimension_dict_scores_simtest["lr_l2"],
pgmpy_hc_dimension_dict_scores_simtest["lr_e"],
pgmpy_hc_dimension_dict_scores_simtest["nb"],
pgmpy_hc_dimension_dict_scores_simtest["nb_g"],
pgmpy_hc_dimension_dict_scores_simtest["nb_m"],
pgmpy_hc_dimension_dict_scores_simtest["nb_c"],
pgmpy_hc_dimension_dict_scores_simtest["svm"],
pgmpy_hc_dimension_dict_scores_simtest["svm_po"],
pgmpy_hc_dimension_dict_scores_simtest["svm_r"],
pgmpy_hc_dimension_dict_scores_simtest["knn"],
pgmpy_hc_dimension_dict_scores_simtest["knn_d"]]
pgmpy_mmhc_dimension_means = [pgmpy_mmhc_dimension_dict_scores_simtest["dt"],
pgmpy_mmhc_dimension_dict_scores_simtest["dt_e"],
pgmpy_mmhc_dimension_dict_scores_simtest["rf"],
pgmpy_mmhc_dimension_dict_scores_simtest["rf_e"],
pgmpy_mmhc_dimension_dict_scores_simtest["lr"],
pgmpy_mmhc_dimension_dict_scores_simtest["lr_l1"],
pgmpy_mmhc_dimension_dict_scores_simtest["lr_l2"],
pgmpy_mmhc_dimension_dict_scores_simtest["lr_e"],
pgmpy_mmhc_dimension_dict_scores_simtest["nb"],
pgmpy_mmhc_dimension_dict_scores_simtest["nb_g"],
pgmpy_mmhc_dimension_dict_scores_simtest["nb_m"],
pgmpy_mmhc_dimension_dict_scores_simtest["nb_c"],
pgmpy_mmhc_dimension_dict_scores_simtest["svm"],
pgmpy_mmhc_dimension_dict_scores_simtest["svm_po"],
pgmpy_mmhc_dimension_dict_scores_simtest["svm_r"],
pgmpy_mmhc_dimension_dict_scores_simtest["knn"],
pgmpy_mmhc_dimension_dict_scores_simtest["knn_d"]]
plt.rcParams["figure.figsize"] = [18, 18]
plt.rcParams["figure.autolayout"] = True
x_axis = np.arange(len(labels))
w = 0.05 # the width of the bars
plt.bar(x_axis + w, bn_dimension_means, width=0.05, label="BN_LEARN (HC)", color="lightsteelblue")
plt.bar(x_axis + w * 2, nt_dimension_means, width=0.05, label="BN_LEARN (TABU)", color="cornflowerblue")
plt.bar(x_axis + w * 3, bn_mmhc_dimension_means, width=0.05, label="BN_LEARN (MMHC)", color="blue")
plt.bar(x_axis + w * 4, bn_rsmax2_dimension_means, width=0.05, label="BN_LEARN (RSMAX2)", color="mediumblue")
plt.bar(x_axis + w * 5, bn_h2pc_dimension_means, width=0.05, label="BN_LEARN (H2PC)", color="navy")
plt.bar(x_axis + w * 6, nt_dimension_means, width=0.05, label="NO_TEARS (logistic)", color="limegreen")
plt.bar(x_axis + w * 7, nt_l2_dimension_means, width=0.05, label="NO_TEARS (l2)", color="forestgreen")
plt.bar(x_axis + w * 8, nt_p_dimension_means, width=0.05, label="NO_TEARS (poisson)", color="darkgreen")
plt.bar(x_axis + w * 9, p_dimension_means, width=0.05, label="POMEGRANATE (exact)", color="darkviolet")
plt.bar(x_axis + w * 10, p_g_dimension_means, width=0.05, label="POMEGRANATE (greed)", color="rebeccapurple")
plt.bar(x_axis + w * 11, pgmpy_mmhc_dimension_means, width=0.05, label="PGMPY (MMHC)", color="#FA8072")
plt.bar(x_axis + w * 12, pgmpy_hc_dimension_means, width=0.05, label="PGMPY (HC)", color="#FF2400")
plt.bar(x_axis + w * 13, pgmpy_tree_dimension_means, width=0.05, label="PGMPY (TREE)", color="#7C0A02")
plt.xticks(x_axis, labels)
plt.legend()
plt.style.use("fivethirtyeight")
plt.ylabel('Accuracy')
plt.xlabel('ML Technique', labelpad=15)
plt.title('Dimension Problem - Performance by library on ML technique')
# plt.ylim(0.6, 1)
# plt.tick_params(rotation=45)
plt.savefig('pipeline_summary_benchmark_for_dimension_by_library_groupbar_simtest.png', bbox_inches='tight')
plt.show()
write_real_to_figures()
def prediction_real_learned():
print("#### SimCal Real/Learned-world Predictions ####")
print("-- Exact (1-1) max(rank) output")
real_linear_workflows = {'Decision Tree (gini)': real_linear_dt_scores, 'Decision Tree (entropy)': real_linear_dt_entropy_scores, 'Random Forest (gini)': real_linear_rf_scores, 'Random Forest (entropy)': real_linear_rf_entropy_scores,'Logistic Regression (none)': real_linear_lr_scores, 'Logistic Regression (l1)': real_linear_lr_l1_scores, 'Logistic Regression (l2)': real_linear_lr_l2_scores, 'Logistic Regression (elasticnet)': real_linear_lr_elastic_scores, 'Naive Bayes (bernoulli)': real_linear_gb_scores, 'Naive Bayes (multinomial)': real_linear_gb_multi_scores, 'Naive Bayes (gaussian)': real_linear_gb_gaussian_scores, 'Naive Bayes (complement)': real_linear_gb_complement_scores, 'Support Vector Machine (sigmoid)': real_linear_svm_scores, 'Support Vector Machine (polynomial)': real_linear_svm_poly_scores, 'Support Vector Machine (rbf)': real_linear_svm_rbf_scores, 'K Nearest Neighbor (uniform)': real_linear_knn_scores, 'K Nearest Neighbor (distance)': real_linear_knn_distance_scores}
top_real_linear = max(real_linear_workflows, key=real_linear_workflows.get)
print("Real world - Linear problem, Prediction: "+ top_real_linear + " (" + str(real_linear_workflows[top_real_linear]) + ")")
sim_linear_workflows = {'BN Decision Tree (HC-gini)': bnlearn_linear_dict_scores["dt"], 'BN Decision Tree (HC-entropy)': bnlearn_linear_dict_scores["dt_e"],'BN Decision Tree (TABU-gini)': bnlearn_tabu_linear_dict_scores["dt"], 'BN Decision Tree (TABU-entropy)': bnlearn_tabu_linear_dict_scores["dt_e"],'BN Decision Tree (PC-gini)': bnlearn_pc_linear_dict_scores["dt"], 'BN Decision Tree (PC-entropy)': bnlearn_pc_linear_dict_scores["dt_e"],'BN Decision Tree (MMHC-gini)': bnlearn_mmhc_linear_dict_scores["dt"], 'BN Decision Tree (MMHC-entropy)': bnlearn_mmhc_linear_dict_scores["dt_e"],'BN Decision Tree (RSMAX2-gini)': bnlearn_rsmax2_linear_dict_scores["dt"], 'BN Decision Tree (RSMAX2-entropy)': bnlearn_rsmax2_linear_dict_scores["dt_e"],'BN Decision Tree (H2PC-gini)': bnlearn_h2pc_linear_dict_scores["dt"], 'BN Decision Tree (H2PC-entropy)': bnlearn_h2pc_linear_dict_scores["dt_e"],'NT Decision Tree (Logistic-gini)': notears_linear_dict_scores["dt"],'NT Decision Tree (Logistic-entropy)': notears_linear_dict_scores["dt_e"], 'NT Decision Tree (L2-gini)': notears_l2_linear_dict_scores["dt"],'NT Decision Tree (L2-entropy)': notears_l2_linear_dict_scores["dt_e"],'NT Decision Tree (Poisson-gini)': notears_poisson_linear_dict_scores["dt"],'NT Decision Tree (Poisson-entropy)': notears_poisson_linear_dict_scores["dt_e"],'POMEGRANATE Decision Tree (Exact-gini)': pomegranate_exact_linear_dict_scores["dt"],'POMEGRANATE Decision Tree (Exact-entropy)': pomegranate_exact_linear_dict_scores["dt_e"],'POMEGRANATE Decision Tree (Greedy-gini)': pomegranate_greedy_linear_dict_scores["dt"],'POMEGRANATE Decision Tree (Greedy-entropy)': pomegranate_greedy_linear_dict_scores["dt_e"],'PGMPY Decision Tree (HC-gini)': pgmpy_hc_linear_dict_scores["dt"],'PGMPY Decision Tree (HC-entropy)': pgmpy_hc_linear_dict_scores["dt_e"],'PGMPY Decision Tree (MMHC-gini)': pgmpy_mmhc_linear_dict_scores["dt"],'PGMPY Decision Tree (HC-entropy)': pgmpy_mmhc_linear_dict_scores["dt_e"],'PGMPY Decision Tree (TREE-gini)': pgmpy_tree_linear_dict_scores["dt"],'PGMPY Decision Tree (TREE-entropy)': pgmpy_tree_linear_dict_scores["dt_e"],'BN Random Forest (HC-gini)': bnlearn_linear_dict_scores["rf"], 'BN Random Forest (HC-entropy)': bnlearn_linear_dict_scores["rf_e"],'BN Random Forest (TABU-gini)': bnlearn_tabu_linear_dict_scores["rf"], 'BN Random Forest (TABU-entropy)': bnlearn_tabu_linear_dict_scores["rf_e"],'BN Random Forest (PC-gini)': bnlearn_pc_linear_dict_scores["rf"], 'BN Random Forest (PC-entropy)': bnlearn_pc_linear_dict_scores["rf_e"],'BN Random Forest (MMHC-gini)': bnlearn_mmhc_linear_dict_scores["rf"], 'BN Random Forest (MMHC-entropy)': bnlearn_mmhc_linear_dict_scores["rf_e"],'BN Random Forest (RSMAX2-gini)': bnlearn_rsmax2_linear_dict_scores["rf"], 'BN Random Forest (RSMAX2-entropy)': bnlearn_rsmax2_linear_dict_scores["rf_e"],'BN Random Forest (H2PC-gini)': bnlearn_h2pc_linear_dict_scores["rf"], 'BN Random Forest (H2PC-entropy)': bnlearn_h2pc_linear_dict_scores["rf_e"],'NT Random Forest (Logistic-gini)': notears_linear_dict_scores["rf"],'NT Random Forest (Logistic-entropy)': notears_linear_dict_scores["rf_e"],'NT Random Forest (L2-gini)': notears_l2_linear_dict_scores["rf"],'NT Random Forest (l2-entropy)': notears_l2_linear_dict_scores["rf_e"],'NT Random Forest (Poisson-gini)': notears_poisson_linear_dict_scores["rf"],'NT Random Forest (Poisson-entropy)': notears_poisson_linear_dict_scores["rf_e"],'POMEGRANATE Random Forest (Exact-gini)': pomegranate_exact_linear_dict_scores["rf"],'POMEGRANATE Random Forest (Exact-entropy)': pomegranate_exact_linear_dict_scores["rf_e"],'POMEGRANATE Random Forest (Greedy-gini)': pomegranate_greedy_linear_dict_scores["rf"],'POMEGRANATE Random Forest (Greedy-entropy)': pomegranate_greedy_linear_dict_scores["rf_e"],'PGMPY Random Forest (HC-gini)': pgmpy_hc_linear_dict_scores["rf"],'PGMPY Random Forest (HC-entropy)': pgmpy_hc_linear_dict_scores["rf_e"],'PGMPY Random Forest (MMHC-gini)': pgmpy_mmhc_linear_dict_scores["rf"],'PGMPY Random Forest (HC-entropy)': pgmpy_mmhc_linear_dict_scores["rf_e"],'PGMPY Random Forest (TREE-gini)': pgmpy_tree_linear_dict_scores["rf"],'PGMPY Random Forest (TREE-entropy)': pgmpy_tree_linear_dict_scores["rf_e"], 'BN Logistic Regression (HC-none)': bnlearn_linear_dict_scores["lr"],'BN Logistic Regression (HC-l1)': bnlearn_linear_dict_scores["lr_l1"],'BN Logistic Regression (HC-l2)': bnlearn_linear_dict_scores["lr_l2"],'BN Logistic Regression (HC-elastic)': bnlearn_linear_dict_scores["lr_e"], 'BN Logistic Regression (TABU-none)': bnlearn_tabu_linear_dict_scores["lr"],'BN Logistic Regression (TABU-l1)': bnlearn_tabu_linear_dict_scores["lr_l1"],'BN Logistic Regression (TABU-l2)': bnlearn_tabu_linear_dict_scores["lr_l2"],'BN Logistic Regression (TABU-elastic)': bnlearn_tabu_linear_dict_scores["lr_e"], 'BN Logistic Regression (PC-none)': bnlearn_pc_linear_dict_scores["lr"],'BN Logistic Regression (PC-l1)': bnlearn_pc_linear_dict_scores["lr_l1"],'BN Logistic Regression (PC-l2)': bnlearn_pc_linear_dict_scores["lr_l2"],'BN Logistic Regression (PC-elastic)': bnlearn_pc_linear_dict_scores["lr_e"], 'BN Logistic Regression (MMHC-none)': bnlearn_mmhc_linear_dict_scores["lr"],'BN Logistic Regression (MMHC-l1)': bnlearn_mmhc_linear_dict_scores["lr_l1"],'BN Logistic Regression (MMHC-l2)': bnlearn_mmhc_linear_dict_scores["lr_l2"],'BN Logistic Regression (MMHC-elastic)': bnlearn_mmhc_linear_dict_scores["lr_e"], 'BN Logistic Regression (RSMAX2-none)': bnlearn_rsmax2_linear_dict_scores["lr"],'BN Logistic Regression (RSMAX2-l1)': bnlearn_rsmax2_linear_dict_scores["lr_l1"],'BN Logistic Regression (RSMAX2-l2)': bnlearn_rsmax2_linear_dict_scores["lr_l2"],'BN Logistic Regression (RSMAX2-elastic)': bnlearn_rsmax2_linear_dict_scores["lr_e"], 'BN Logistic Regression (H2PC-none)': bnlearn_h2pc_linear_dict_scores["lr"],'BN Logistic Regression (H2PC-l1)': bnlearn_h2pc_linear_dict_scores["lr_l1"],'BN Logistic Regression (H2PC-l2)': bnlearn_h2pc_linear_dict_scores["lr_l2"],'BN Logistic Regression (H2PC-elastic)': bnlearn_h2pc_linear_dict_scores["lr_e"], 'POMEGRANATE Logistic Regression (Exact-none)': pomegranate_exact_linear_dict_scores["lr"],'POMEGRANATE Logistic Regression (Exact-l1)': pomegranate_exact_linear_dict_scores["lr_l1"],'POMEGRANATE Logistic Regression (Exact-l2)': pomegranate_exact_linear_dict_scores["lr_l2"],'POMEGRANATE Logistic Regression (Exact-elastic)': pomegranate_exact_linear_dict_scores["lr_e"],'POMEGRANATE Logistic Regression (Greedy-none)': pomegranate_greedy_linear_dict_scores["lr"],'POMEGRANATE Logistic Regression (Greedy-l1)': pomegranate_greedy_linear_dict_scores["lr_l1"],'POMEGRANATE Logistic Regression (Greedy-l2)': pomegranate_greedy_linear_dict_scores["lr_l2"],'POMEGRANATE Logistic Regression (Greedy-elastic)': pomegranate_greedy_linear_dict_scores["lr_e"],'PGMPY Logistic Regression (HC-none)': pgmpy_hc_linear_dict_scores["lr"],'PGMPY Logistic Regression (HC-l1)': pgmpy_hc_linear_dict_scores["lr_l1"],'PGMPY Logistic Regression (MMHC-l2)': pgmpy_mmhc_linear_dict_scores["lr_l2"],'PGMPY Logistic Regression (HC-elastic)': pgmpy_mmhc_linear_dict_scores["lr_e"],'PGMPY Logistic Regression (TREE-none)': pgmpy_tree_linear_dict_scores["lr"],'PGMPY Logistic Regression (TREE-l1)': pgmpy_tree_linear_dict_scores["lr_l1"],'PGMPY Logistic Regression (TREE-l2)': pgmpy_tree_linear_dict_scores["lr_l2"],'PGMPY Logistic Regression (TREE-elastic)': pgmpy_tree_linear_dict_scores["lr_e"], 'PGMPY Logistic Regression (MMHC-none)': pgmpy_mmhc_linear_dict_scores["lr"],'PGMPY Logistic Regression (MMHC-l1)': pgmpy_mmhc_linear_dict_scores["lr_l1"],'PGMPY Logistic Regression (MMHC-l2)': pgmpy_mmhc_linear_dict_scores["lr_l2"],'PGMPY Logistic Regression (MMHC-elastic)': pgmpy_mmhc_linear_dict_scores["lr_e"],'NT Logistic Regression (Logistic-none)': notears_linear_dict_scores["lr"], 'NT Logistic Regression (Logistic-l1)': notears_linear_dict_scores["lr_l1"], 'NT Logistic Regression (Logistic-l2)': notears_linear_dict_scores["lr_l2"], 'NT Logistic Regression (Logistic-elastic)': notears_linear_dict_scores["lr_e"],'NT Logistic Regression (L2-none)': notears_l2_linear_dict_scores["lr"], 'NT Logistic Regression (L2-l1)': notears_l2_linear_dict_scores["lr_l1"], 'NT Logistic Regression (L2-l2)': notears_l2_linear_dict_scores["lr_l2"], 'NT Logistic Regression (L2-elastic)': notears_l2_linear_dict_scores["lr_e"],'NT Logistic Regression (Poisson-none)': notears_poisson_linear_dict_scores["lr"], 'NT Logistic Regression (Poisson-l1)': notears_poisson_linear_dict_scores["lr_l1"], 'NT Logistic Regression (Poisson-l2)': notears_poisson_linear_dict_scores["lr_l2"], 'NT Logistic Regression (Poisson-elastic)': notears_poisson_linear_dict_scores["lr_e"], 'BN Naive Bayes (HC-bernoulli)': bnlearn_linear_dict_scores["nb"],'BN Naive Bayes (HC-gaussian)': bnlearn_linear_dict_scores["nb_g"],'BN Naive Bayes (HC-multinomial)': bnlearn_linear_dict_scores["nb_m"],'BN Naive Bayes (HC-complement)': bnlearn_linear_dict_scores["nb_c"],'BN Naive Bayes (TABU-bernoulli)': bnlearn_tabu_linear_dict_scores["nb"],'BN Naive Bayes (TABU-gaussian)': bnlearn_tabu_linear_dict_scores["nb_g"],'BN Naive Bayes (TABU-multinomial)': bnlearn_tabu_linear_dict_scores["nb_m"],'BN Naive Bayes (TABU-complement)': bnlearn_tabu_linear_dict_scores["nb_c"],'BN Naive Bayes (PC-bernoulli)': bnlearn_pc_linear_dict_scores["nb"],'BN Naive Bayes (PC-gaussian)': bnlearn_pc_linear_dict_scores["nb_g"],'BN Naive Bayes (PC-multinomial)': bnlearn_pc_linear_dict_scores["nb_m"],'BN Naive Bayes (PC-complement)': bnlearn_pc_linear_dict_scores["nb_c"], 'BN Naive Bayes (MMHC-bernoulli)': bnlearn_mmhc_linear_dict_scores["nb"],'BN Naive Bayes (MMHC-gaussian)': bnlearn_mmhc_linear_dict_scores["nb_g"],'BN Naive Bayes (MMHC-multinomial)': bnlearn_mmhc_linear_dict_scores["nb_m"],'BN Naive Bayes (MMHC-complement)': bnlearn_mmhc_linear_dict_scores["nb_c"],'BN Naive Bayes (RSMAX2-bernoulli)': bnlearn_rsmax2_linear_dict_scores["nb"],'BN Naive Bayes (RSMAX2-gaussian)': bnlearn_rsmax2_linear_dict_scores["nb_g"],'BN Naive Bayes (RSMAX2-multinomial)': bnlearn_rsmax2_linear_dict_scores["nb_m"],'BN Naive Bayes (RSMAX2-complement)': bnlearn_rsmax2_linear_dict_scores["nb_c"],'BN Naive Bayes (H2PC-bernoulli)': bnlearn_h2pc_linear_dict_scores["nb"],'BN Naive Bayes (H2PC-gaussian)': bnlearn_h2pc_linear_dict_scores["nb_g"],'BN Naive Bayes (H2PC-multinomial)': bnlearn_h2pc_linear_dict_scores["nb_m"],'BN Naive Bayes (H2PC-complement)': bnlearn_h2pc_linear_dict_scores["nb_c"],'NT Naive Bayes (Logistic-bernoulli)': notears_linear_dict_scores["nb"],'NT Naive Bayes (Logistic-gaussian)': notears_linear_dict_scores["nb_g"],'NT Naive Bayes (Logistic-multinomial)': notears_linear_dict_scores["nb_m"],'NT Naive Bayes (Logistic-complement)': notears_linear_dict_scores["nb_c"], 'NT Naive Bayes (L2-bernoulli)': notears_l2_linear_dict_scores["nb"],'NT Naive Bayes (L2-gaussian)': notears_l2_linear_dict_scores["nb_g"],'NT Naive Bayes (L2-multinomial)': notears_l2_linear_dict_scores["nb_m"],'NT Naive Bayes (L2-complement)': notears_l2_linear_dict_scores["nb_c"],'NT Naive Bayes (Poisson-bernoulli)': notears_poisson_linear_dict_scores["nb"],'NT Naive Bayes (Poisson-gaussian)': notears_poisson_linear_dict_scores["nb_g"],'NT Naive Bayes (Poisson-multinomial)': notears_poisson_linear_dict_scores["nb_m"],'NT Naive Bayes (Poisson-complement)': notears_poisson_linear_dict_scores["nb_c"],'POMEGRANATE Naive Bayes (Greedy-bernoulli)': pomegranate_greedy_linear_dict_scores["nb"],'POMEGRANATE Naive Bayes (Greedy-gaussian)': pomegranate_greedy_linear_dict_scores["nb_g"],'POMEGRANATE Naive Bayes (Greedy-multinomial)': pomegranate_greedy_linear_dict_scores["nb_m"],'POMEGRANATE Naive Bayes (Greedy-complement)': pomegranate_greedy_linear_dict_scores["nb_c"],'POMEGRANATE Naive Bayes (Exact-bernoulli)': pomegranate_exact_linear_dict_scores["nb"],'POMEGRANATE Naive Bayes (Exact-gaussian)': pomegranate_exact_linear_dict_scores["nb_g"],'POMEGRANATE Naive Bayes (Exact-multinomial)': pomegranate_exact_linear_dict_scores["nb_m"],'POMEGRANATE Naive Bayes (Exact-complement)': pomegranate_exact_linear_dict_scores["nb_c"], 'PGMPY Naive Bayes (HC-bernoulli)': pgmpy_hc_linear_dict_scores["nb"],'PGMPY Naive Bayes (HC-gaussian)': pgmpy_hc_linear_dict_scores["nb_g"],'PGMPY Naive Bayes (HC-multinomial)': pgmpy_hc_linear_dict_scores["nb_m"],'PGMPY Naive Bayes (HC-complement)': pgmpy_hc_linear_dict_scores["nb_c"], 'PGMPY Naive Bayes (MMHC-bernoulli)': pgmpy_mmhc_linear_dict_scores["nb"],'PGMPY Naive Bayes (MMHC-gaussian)': pgmpy_mmhc_linear_dict_scores["nb_g"],'PGMPY Naive Bayes (MMHC-multinomial)': pgmpy_mmhc_linear_dict_scores["nb_m"],'PGMPY Naive Bayes (MMHC-complement)': pgmpy_mmhc_linear_dict_scores["nb_c"], 'PGMPY Naive Bayes (TREE-bernoulli)': pgmpy_tree_linear_dict_scores["nb"],'PGMPY Naive Bayes (TREE-gaussian)': pgmpy_tree_linear_dict_scores["nb_g"],'PGMPY Naive Bayes (TREE-multinomial)': pgmpy_tree_linear_dict_scores["nb_m"],'PGMPY Naive Bayes (TREE-complement)': pgmpy_tree_linear_dict_scores["nb_c"], 'BN Support Vector Machine (HC-sigmoid)': bnlearn_linear_dict_scores["svm"], 'BN Support Vector Machine (HC-polynomial)': bnlearn_linear_dict_scores["svm_po"], 'BN Support Vector Machine (HC-rbf)': bnlearn_linear_dict_scores["svm_r"], 'BN Support Vector Machine (TABU-sigmoid)': bnlearn_tabu_linear_dict_scores["svm"], 'BN Support Vector Machine (TABU-polynomial)': bnlearn_tabu_linear_dict_scores["svm_po"], 'BN Support Vector Machine (TABU-rbf)': bnlearn_tabu_linear_dict_scores["svm_r"],'BN Support Vector Machine (PC-sigmoid)': bnlearn_pc_linear_dict_scores["svm"], 'BN Support Vector Machine (PC-polynomial)': bnlearn_pc_linear_dict_scores["svm_po"], 'BN Support Vector Machine (PC-rbf)': bnlearn_pc_linear_dict_scores["svm_r"],'BN Support Vector Machine (MMHC-sigmoid)': bnlearn_mmhc_linear_dict_scores["svm"], 'BN Support Vector Machine (MMHC-polynomial)': bnlearn_mmhc_linear_dict_scores["svm_po"], 'BN Support Vector Machine (MMHC-rbf)': bnlearn_mmhc_linear_dict_scores["svm_r"],'BN Support Vector Machine (RSMAX2-sigmoid)': bnlearn_rsmax2_linear_dict_scores["svm"], 'BN Support Vector Machine (RSMAX2-polynomial)': bnlearn_rsmax2_linear_dict_scores["svm_po"], 'BN Support Vector Machine (RSMAX2-rbf)': bnlearn_rsmax2_linear_dict_scores["svm_r"],'BN Support Vector Machine (H2PC-sigmoid)': bnlearn_h2pc_linear_dict_scores["svm"], 'BN Support Vector Machine (H2PC-polynomial)': bnlearn_h2pc_linear_dict_scores["svm_po"], 'BN Support Vector Machine (H2PC-rbf)': bnlearn_h2pc_linear_dict_scores["svm_r"],'NT Support Vector Machine (logistic-sigmoid)': notears_linear_dict_scores["svm"],'NT Support Vector Machine (logistic-polynomial)': notears_linear_dict_scores["svm_po"],'NT Support Vector Machine (logistic-rbf)': notears_linear_dict_scores["svm_r"],'NT Support Vector Machine (L2-sigmoid)': notears_l2_linear_dict_scores["svm"],'NT Support Vector Machine (L2-polynomial)': notears_l2_linear_dict_scores["svm_po"],'NT Support Vector Machine (L2-rbf)': notears_l2_linear_dict_scores["svm_r"],'NT Support Vector Machine (Poisson-sigmoid)': notears_poisson_linear_dict_scores["svm"],'NT Support Vector Machine (Poisson-polynomial)': notears_poisson_linear_dict_scores["svm_po"],'NT Support Vector Machine (Poisson-rbf)': notears_poisson_linear_dict_scores["svm_r"], 'Pomegranate Support Vector Machine (Exact-sigmoid)': pomegranate_exact_linear_dict_scores["svm"],'Pomegranate Support Vector Machine (Exact-polynomial)': pomegranate_exact_linear_dict_scores["svm_po"],'Pomegranate Support Vector Machine (Exact-rbf)': pomegranate_exact_linear_dict_scores["svm_r"], 'Pomegranate Support Vector Machine (Greedy-sigmoid)': pomegranate_greedy_linear_dict_scores["svm"],'Pomegranate Support Vector Machine (Greedy-polynomial)': pomegranate_greedy_linear_dict_scores["svm_po"],'Pomegranate Support Vector Machine (Greedy-rbf)': pomegranate_greedy_linear_dict_scores["svm_r"], 'PGMPY Support Vector Machine (HC-sigmoid)': pgmpy_hc_linear_dict_scores["svm"],'PGMPY Support Vector Machine (HC-polynomial)': pgmpy_hc_linear_dict_scores["svm_po"],'PGMPY Support Vector Machine (HC-rbf)': pgmpy_hc_linear_dict_scores["svm_r"], 'PGMPY Support Vector Machine (MMHC-sigmoid)': pgmpy_mmhc_linear_dict_scores["svm"],'PGMPY Support Vector Machine (MMHC-polynomial)': pgmpy_mmhc_linear_dict_scores["svm_po"],'PGMPY Support Vector Machine (MMHC-rbf)': pgmpy_mmhc_linear_dict_scores["svm_r"], 'PGMPY Support Vector Machine (TREE-sigmoid)': pgmpy_tree_linear_dict_scores["svm"],'PGMPY Support Vector Machine (TREE-polynomial)': pgmpy_tree_linear_dict_scores["svm_po"],'PGMPY Support Vector Machine (TREE-rbf)': pgmpy_tree_linear_dict_scores["svm_r"],'BN K Nearest Neighbor (HC-weight)': bnlearn_linear_dict_scores["knn"],'BN K Nearest Neighbor (HC-distance)': bnlearn_linear_dict_scores["knn_d"],'BN K Nearest Neighbor (TABU-weight)': bnlearn_tabu_linear_dict_scores["knn"],'BN K Nearest Neighbor (TABU-distance)': bnlearn_tabu_linear_dict_scores["knn_d"],'BN K Nearest Neighbor (PC-weight)': bnlearn_pc_linear_dict_scores["knn"],'BN K Nearest Neighbor (PC-distance)': bnlearn_pc_linear_dict_scores["knn_d"],'BN K Nearest Neighbor (MMHC-weight)': bnlearn_mmhc_linear_dict_scores["knn"],'BN K Nearest Neighbor (MMHC-distance)': bnlearn_mmhc_linear_dict_scores["knn_d"],'BN K Nearest Neighbor (RSMAX2-weight)': bnlearn_rsmax2_linear_dict_scores["knn"],'BN K Nearest Neighbor (RSMAX2-distance)': bnlearn_rsmax2_linear_dict_scores["knn_d"],'BN K Nearest Neighbor (H2PC-weight)': bnlearn_h2pc_linear_dict_scores["knn"],'BN K Nearest Neighbor (H2PC-distance)': bnlearn_h2pc_linear_dict_scores["knn_d"],'NT K Nearest Neighbor (Logistic-weight)': notears_linear_dict_scores["knn"], 'NT K Nearest Neighbor (Logistic-distance)': notears_linear_dict_scores["knn_d"],'NT K Nearest Neighbor (L2-weight)': notears_l2_linear_dict_scores["knn"], 'NT K Nearest Neighbor (L2-distance)': notears_l2_linear_dict_scores["knn_d"], 'NT K Nearest Neighbor (Poisson-weight)': notears_poisson_linear_dict_scores["knn"], 'NT K Nearest Neighbor (Poisson-distance)': notears_poisson_linear_dict_scores["knn_d"], 'POMEGRANATE K Nearest Neighbor (Exact-weight)': pomegranate_exact_linear_dict_scores["knn"], 'POMEGRANATE K Nearest Neighbor (Exact-distance)': pomegranate_exact_linear_dict_scores["knn_d"], 'POMEGRANATE K Nearest Neighbor (Greedy-weight)': pomegranate_greedy_linear_dict_scores["knn"], 'POMEGRANATE K Nearest Neighbor (Greedy-distance)': pomegranate_greedy_linear_dict_scores["knn_d"], 'PGMPY K Nearest Neighbor (HC-weight)': pgmpy_hc_linear_dict_scores["knn"], 'PGMPY K Nearest Neighbor (HC-distance)': pgmpy_hc_linear_dict_scores["knn_d"], 'PGMPY K Nearest Neighbor (MMHC-weight)': pgmpy_mmhc_linear_dict_scores["knn"], 'PGMPY K Nearest Neighbor (MMHC-distance)': pgmpy_mmhc_linear_dict_scores["knn_d"], 'PGMPY K Nearest Neighbor (TREE-weight)': pgmpy_tree_linear_dict_scores["knn"], 'PGMPY K Nearest Neighbor (TREE-distance)': pgmpy_tree_linear_dict_scores["knn_d"]}
top_learned_linear = max(sim_linear_workflows, key=sim_linear_workflows.get)
print("Learned world - Linear problem, Prediction: "+ top_learned_linear + " (" + str(sim_linear_workflows[top_learned_linear]) + ")")
real_nonlinear_workflows = {'Decision Tree (gini)': real_nonlinear_dt_scores,
'Decision Tree (entropy)': real_nonlinear_dt_entropy_scores,
'Random Forest (gini)': real_nonlinear_rf_scores,
'Random Forest (entropy)': real_nonlinear_rf_entropy_scores,
'Logistic Regression (none)': real_nonlinear_lr_scores,
'Logistic Regression (l1)': real_nonlinear_lr_l1_scores,
'Logistic Regression (l2)': real_nonlinear_lr_l2_scores,
'Logistic Regression (elasticnet)': real_nonlinear_lr_elastic_scores,
'Naive Bayes (bernoulli)': real_nonlinear_gb_scores,
'Naive Bayes (multinomial)': real_nonlinear_gb_multi_scores,
'Naive Bayes (gaussian)': real_nonlinear_gb_gaussian_scores,
'Naive Bayes (complement)': real_nonlinear_gb_complement_scores,
'Support Vector Machine (sigmoid)': real_nonlinear_svm_scores,
'Support Vector Machine (polynomial)': real_nonlinear_svm_poly_scores,
'Support Vector Machine (rbf)': real_nonlinear_svm_rbf_scores,
'K Nearest Neighbor (uniform)': real_nonlinear_knn_scores,
'K Nearest Neighbor (distance)': real_nonlinear_knn_distance_scores}
top_real_nonlinear = max(real_nonlinear_workflows, key=real_nonlinear_workflows.get)
print("Real world - Nonlinear problem, Prediction: "+ top_real_nonlinear + " (" + str(real_nonlinear_workflows[top_real_nonlinear]) + ")")
sim_nonlinear_workflows = {'BN Decision Tree (HC-gini)': bnlearn_nonlinear_dict_scores["dt"],
'BN Decision Tree (HC-entropy)': bnlearn_nonlinear_dict_scores["dt_e"],
'BN Decision Tree (TABU-gini)': bnlearn_tabu_nonlinear_dict_scores["dt"],
'BN Decision Tree (TABU-entropy)': bnlearn_tabu_nonlinear_dict_scores["dt_e"],
#'BN Decision Tree (PC-gini)': bnlearn_pc_nonlinear_dict_scores["dt"],
#'BN Decision Tree (PC-entropy)': bnlearn_pc_nonlinear_dict_scores["dt_e"],
'BN Decision Tree (MMHC-gini)': bnlearn_mmhc_nonlinear_dict_scores["dt"],
'BN Decision Tree (MMHC-entropy)': bnlearn_mmhc_nonlinear_dict_scores["dt_e"],
'BN Decision Tree (RSMAX2-gini)': bnlearn_rsmax2_nonlinear_dict_scores["dt"],
'BN Decision Tree (RSMAX2-entropy)': bnlearn_rsmax2_nonlinear_dict_scores["dt_e"],
'BN Decision Tree (H2PC-gini)': bnlearn_h2pc_nonlinear_dict_scores["dt"],
'BN Decision Tree (H2PC-entropy)': bnlearn_h2pc_nonlinear_dict_scores["dt_e"],
'NT Decision Tree (Logistic-gini)': notears_nonlinear_dict_scores["dt"],
'NT Decision Tree (Logistic-entropy)': notears_nonlinear_dict_scores["dt_e"],
'NT Decision Tree (L2-gini)': notears_l2_nonlinear_dict_scores["dt"],
'NT Decision Tree (L2-entropy)': notears_l2_nonlinear_dict_scores["dt_e"],
'NT Decision Tree (Poisson-gini)': notears_poisson_nonlinear_dict_scores["dt"],
'NT Decision Tree (Poisson-entropy)': notears_poisson_nonlinear_dict_scores["dt_e"],
'POMEGRANATE Decision Tree (Exact-gini)': pomegranate_exact_nonlinear_dict_scores["dt"],
'POMEGRANATE Decision Tree (Exact-entropy)': pomegranate_exact_nonlinear_dict_scores["dt_e"],
'POMEGRANATE Decision Tree (Greedy-gini)': pomegranate_greedy_nonlinear_dict_scores["dt"],
'POMEGRANATE Decision Tree (Greedy-entropy)': pomegranate_greedy_nonlinear_dict_scores["dt_e"],
'PGMPY Decision Tree (HC-gini)': pgmpy_hc_nonlinear_dict_scores["dt"],
'PGMPY Decision Tree (HC-entropy)': pgmpy_hc_nonlinear_dict_scores["dt_e"],
'PGMPY Decision Tree (MMHC-gini)': pgmpy_mmhc_nonlinear_dict_scores["dt"],
'PGMPY Decision Tree (HC-entropy)': pgmpy_mmhc_nonlinear_dict_scores["dt_e"],
'PGMPY Decision Tree (TREE-gini)': pgmpy_tree_nonlinear_dict_scores["dt"],
'PGMPY Decision Tree (TREE-entropy)': pgmpy_tree_nonlinear_dict_scores["dt_e"],
'BN Random Forest (HC-gini)': bnlearn_nonlinear_dict_scores["rf"],
'BN Random Forest (HC-entropy)': bnlearn_nonlinear_dict_scores["rf_e"],
'BN Random Forest (TABU-gini)': bnlearn_tabu_nonlinear_dict_scores["rf"],
'BN Random Forest (TABU-entropy)': bnlearn_tabu_nonlinear_dict_scores["rf_e"],
#'BN Random Forest (PC-gini)': bnlearn_pc_nonlinear_dict_scores["rf"],
#'BN Random Forest (PC-entropy)': bnlearn_pc_nonlinear_dict_scores["rf_e"],
'BN Random Forest (MMHC-gini)': bnlearn_mmhc_nonlinear_dict_scores["rf"],
'BN Random Forest (MMHC-entropy)': bnlearn_mmhc_nonlinear_dict_scores["rf_e"],
'BN Random Forest (RSMAX2-gini)': bnlearn_rsmax2_nonlinear_dict_scores["rf"],
'BN Random Forest (RSMAX2-entropy)': bnlearn_rsmax2_nonlinear_dict_scores["rf_e"],
'BN Random Forest (H2PC-gini)': bnlearn_h2pc_nonlinear_dict_scores["rf"],
'BN Random Forest (H2PC-entropy)': bnlearn_h2pc_nonlinear_dict_scores["rf_e"],
'NT Random Forest (Logistic-gini)': notears_nonlinear_dict_scores["rf"],
'NT Random Forest (Logistic-entropy)': notears_nonlinear_dict_scores["rf_e"],
'NT Random Forest (L2-gini)': notears_l2_nonlinear_dict_scores["rf"],
'NT Random Forest (l2-entropy)': notears_l2_nonlinear_dict_scores["rf_e"],
'NT Random Forest (Poisson-gini)': notears_poisson_nonlinear_dict_scores["rf"],
'NT Random Forest (Poisson-entropy)': notears_poisson_nonlinear_dict_scores["rf_e"],
'POMEGRANATE Random Forest (Exact-gini)': pomegranate_exact_nonlinear_dict_scores["rf"],
'POMEGRANATE Random Forest (Exact-entropy)': pomegranate_exact_nonlinear_dict_scores["rf_e"],
'POMEGRANATE Random Forest (Greedy-gini)': pomegranate_greedy_nonlinear_dict_scores["rf"],
'POMEGRANATE Random Forest (Greedy-entropy)': pomegranate_greedy_nonlinear_dict_scores["rf_e"],
'PGMPY Random Forest (HC-gini)': pgmpy_hc_nonlinear_dict_scores["rf"],
'PGMPY Random Forest (HC-entropy)': pgmpy_hc_nonlinear_dict_scores["rf_e"],
'PGMPY Random Forest (MMHC-gini)': pgmpy_mmhc_nonlinear_dict_scores["rf"],
'PGMPY Random Forest (HC-entropy)': pgmpy_mmhc_nonlinear_dict_scores["rf_e"],
'PGMPY Random Forest (TREE-gini)': pgmpy_tree_nonlinear_dict_scores["rf"],
'PGMPY Random Forest (TREE-entropy)': pgmpy_tree_nonlinear_dict_scores["rf_e"],
'BN Logistic Regression (HC-none)': bnlearn_nonlinear_dict_scores["lr"],
'BN Logistic Regression (HC-l1)': bnlearn_nonlinear_dict_scores["lr_l1"],
'BN Logistic Regression (HC-l2)': bnlearn_nonlinear_dict_scores["lr_l2"],
'BN Logistic Regression (HC-elastic)': bnlearn_nonlinear_dict_scores["lr_e"],
'BN Logistic Regression (TABU-none)': bnlearn_tabu_nonlinear_dict_scores["lr"],
'BN Logistic Regression (TABU-l1)': bnlearn_tabu_nonlinear_dict_scores["lr_l1"],
'BN Logistic Regression (TABU-l2)': bnlearn_tabu_nonlinear_dict_scores["lr_l2"],
'BN Logistic Regression (TABU-elastic)': bnlearn_tabu_nonlinear_dict_scores["lr_e"],
#'BN Logistic Regression (PC-none)': bnlearn_pc_nonlinear_dict_scores["lr"],
#'BN Logistic Regression (PC-l1)': bnlearn_pc_nonlinear_dict_scores["lr_l1"],
#'BN Logistic Regression (PC-l2)': bnlearn_pc_nonlinear_dict_scores["lr_l2"],
#'BN Logistic Regression (PC-elastic)': bnlearn_pc_nonlinear_dict_scores["lr_e"],
'BN Logistic Regression (MMHC-none)': bnlearn_mmhc_nonlinear_dict_scores["lr"],
'BN Logistic Regression (MMHC-l1)': bnlearn_mmhc_nonlinear_dict_scores["lr_l1"],
'BN Logistic Regression (MMHC-l2)': bnlearn_mmhc_nonlinear_dict_scores["lr_l2"],
'BN Logistic Regression (MMHC-elastic)': bnlearn_mmhc_nonlinear_dict_scores["lr_e"],
'BN Logistic Regression (RSMAX2-none)': bnlearn_rsmax2_nonlinear_dict_scores["lr"],
'BN Logistic Regression (RSMAX2-l1)': bnlearn_rsmax2_nonlinear_dict_scores["lr_l1"],
'BN Logistic Regression (RSMAX2-l2)': bnlearn_rsmax2_nonlinear_dict_scores["lr_l2"],
'BN Logistic Regression (RSMAX2-elastic)': bnlearn_rsmax2_nonlinear_dict_scores["lr_e"],
'BN Logistic Regression (H2PC-none)': bnlearn_h2pc_nonlinear_dict_scores["lr"],
'BN Logistic Regression (H2PC-l1)': bnlearn_h2pc_nonlinear_dict_scores["lr_l1"],
'BN Logistic Regression (H2PC-l2)': bnlearn_h2pc_nonlinear_dict_scores["lr_l2"],
'BN Logistic Regression (H2PC-elastic)': bnlearn_h2pc_nonlinear_dict_scores["lr_e"],
'POMEGRANATE Logistic Regression (Exact-none)': pomegranate_exact_nonlinear_dict_scores["lr"],
'POMEGRANATE Logistic Regression (Exact-l1)': pomegranate_exact_nonlinear_dict_scores["lr_l1"],
'POMEGRANATE Logistic Regression (Exact-l2)': pomegranate_exact_nonlinear_dict_scores["lr_l2"],
'POMEGRANATE Logistic Regression (Exact-elastic)': pomegranate_exact_nonlinear_dict_scores[
"lr_e"],
'POMEGRANATE Logistic Regression (Greedy-none)': pomegranate_greedy_nonlinear_dict_scores[
"lr"],
'POMEGRANATE Logistic Regression (Greedy-l1)': pomegranate_greedy_nonlinear_dict_scores[
"lr_l1"],
'POMEGRANATE Logistic Regression (Greedy-l2)': pomegranate_greedy_nonlinear_dict_scores[
"lr_l2"],
'POMEGRANATE Logistic Regression (Greedy-elastic)': pomegranate_greedy_nonlinear_dict_scores[
"lr_e"], 'PGMPY Logistic Regression (HC-none)': pgmpy_hc_nonlinear_dict_scores["lr"],
'PGMPY Logistic Regression (HC-l1)': pgmpy_hc_nonlinear_dict_scores["lr_l1"],
'PGMPY Logistic Regression (MMHC-l2)': pgmpy_mmhc_nonlinear_dict_scores["lr_l2"],
'PGMPY Logistic Regression (HC-elastic)': pgmpy_mmhc_nonlinear_dict_scores["lr_e"],
'PGMPY Logistic Regression (TREE-none)': pgmpy_tree_nonlinear_dict_scores["lr"],
'PGMPY Logistic Regression (TREE-l1)': pgmpy_tree_nonlinear_dict_scores["lr_l1"],
'PGMPY Logistic Regression (TREE-l2)': pgmpy_tree_nonlinear_dict_scores["lr_l2"],
'PGMPY Logistic Regression (TREE-elastic)': pgmpy_tree_nonlinear_dict_scores["lr_e"],
'PGMPY Logistic Regression (MMHC-none)': pgmpy_mmhc_nonlinear_dict_scores["lr"],
'PGMPY Logistic Regression (MMHC-l1)': pgmpy_mmhc_nonlinear_dict_scores["lr_l1"],
'PGMPY Logistic Regression (MMHC-l2)': pgmpy_mmhc_nonlinear_dict_scores["lr_l2"],
'PGMPY Logistic Regression (MMHC-elastic)': pgmpy_mmhc_nonlinear_dict_scores["lr_e"],
'NT Logistic Regression (Logistic-none)': notears_nonlinear_dict_scores["lr"],
'NT Logistic Regression (Logistic-l1)': notears_nonlinear_dict_scores["lr_l1"],
'NT Logistic Regression (Logistic-l2)': notears_nonlinear_dict_scores["lr_l2"],
'NT Logistic Regression (Logistic-elastic)': notears_nonlinear_dict_scores["lr_e"],
'NT Logistic Regression (L2-none)': notears_l2_nonlinear_dict_scores["lr"],
'NT Logistic Regression (L2-l1)': notears_l2_nonlinear_dict_scores["lr_l1"],
'NT Logistic Regression (L2-l2)': notears_l2_nonlinear_dict_scores["lr_l2"],
'NT Logistic Regression (L2-elastic)': notears_l2_nonlinear_dict_scores["lr_e"],
'NT Logistic Regression (Poisson-none)': notears_poisson_nonlinear_dict_scores["lr"],
'NT Logistic Regression (Poisson-l1)': notears_poisson_nonlinear_dict_scores["lr_l1"],
'NT Logistic Regression (Poisson-l2)': notears_poisson_nonlinear_dict_scores["lr_l2"],
'NT Logistic Regression (Poisson-elastic)': notears_poisson_nonlinear_dict_scores["lr_e"],
'BN Naive Bayes (HC-bernoulli)': bnlearn_nonlinear_dict_scores["nb"],
'BN Naive Bayes (HC-gaussian)': bnlearn_nonlinear_dict_scores["nb_g"],
'BN Naive Bayes (HC-multinomial)': bnlearn_nonlinear_dict_scores["nb_m"],
'BN Naive Bayes (HC-complement)': bnlearn_nonlinear_dict_scores["nb_c"],
'BN Naive Bayes (TABU-bernoulli)': bnlearn_tabu_nonlinear_dict_scores["nb"],
'BN Naive Bayes (TABU-gaussian)': bnlearn_tabu_nonlinear_dict_scores["nb_g"],
'BN Naive Bayes (TABU-multinomial)': bnlearn_tabu_nonlinear_dict_scores["nb_m"],
'BN Naive Bayes (TABU-complement)': bnlearn_tabu_nonlinear_dict_scores["nb_c"],
#'BN Naive Bayes (PC-bernoulli)': bnlearn_pc_nonlinear_dict_scores["nb"],
#'BN Naive Bayes (PC-gaussian)': bnlearn_pc_nonlinear_dict_scores["nb_g"],
#'BN Naive Bayes (PC-multinomial)': bnlearn_pc_nonlinear_dict_scores["nb_m"],
#'BN Naive Bayes (PC-complement)': bnlearn_pc_nonlinear_dict_scores["nb_c"],
'BN Naive Bayes (MMHC-bernoulli)': bnlearn_mmhc_nonlinear_dict_scores["nb"],
'BN Naive Bayes (MMHC-gaussian)': bnlearn_mmhc_nonlinear_dict_scores["nb_g"],
'BN Naive Bayes (MMHC-multinomial)': bnlearn_mmhc_nonlinear_dict_scores["nb_m"],
'BN Naive Bayes (MMHC-complement)': bnlearn_mmhc_nonlinear_dict_scores["nb_c"],
'BN Naive Bayes (RSMAX2-bernoulli)': bnlearn_rsmax2_nonlinear_dict_scores["nb"],
'BN Naive Bayes (RSMAX2-gaussian)': bnlearn_rsmax2_nonlinear_dict_scores["nb_g"],
'BN Naive Bayes (RSMAX2-multinomial)': bnlearn_rsmax2_nonlinear_dict_scores["nb_m"],
'BN Naive Bayes (RSMAX2-complement)': bnlearn_rsmax2_nonlinear_dict_scores["nb_c"],
'BN Naive Bayes (H2PC-bernoulli)': bnlearn_h2pc_nonlinear_dict_scores["nb"],
'BN Naive Bayes (H2PC-gaussian)': bnlearn_h2pc_nonlinear_dict_scores["nb_g"],
'BN Naive Bayes (H2PC-multinomial)': bnlearn_h2pc_nonlinear_dict_scores["nb_m"],
'BN Naive Bayes (H2PC-complement)': bnlearn_h2pc_nonlinear_dict_scores["nb_c"],
'NT Naive Bayes (Logistic-bernoulli)': notears_nonlinear_dict_scores["nb"],
'NT Naive Bayes (Logistic-gaussian)': notears_nonlinear_dict_scores["nb_g"],
'NT Naive Bayes (Logistic-multinomial)': notears_nonlinear_dict_scores["nb_m"],
'NT Naive Bayes (Logistic-complement)': notears_nonlinear_dict_scores["nb_c"],
'NT Naive Bayes (L2-bernoulli)': notears_l2_nonlinear_dict_scores["nb"],
'NT Naive Bayes (L2-gaussian)': notears_l2_nonlinear_dict_scores["nb_g"],
'NT Naive Bayes (L2-multinomial)': notears_l2_nonlinear_dict_scores["nb_m"],
'NT Naive Bayes (L2-complement)': notears_l2_nonlinear_dict_scores["nb_c"],
'NT Naive Bayes (Poisson-bernoulli)': notears_poisson_nonlinear_dict_scores["nb"],
'NT Naive Bayes (Poisson-gaussian)': notears_poisson_nonlinear_dict_scores["nb_g"],
'NT Naive Bayes (Poisson-multinomial)': notears_poisson_nonlinear_dict_scores["nb_m"],
'NT Naive Bayes (Poisson-complement)': notears_poisson_nonlinear_dict_scores["nb_c"],
'POMEGRANATE Naive Bayes (Greedy-bernoulli)': pomegranate_greedy_nonlinear_dict_scores["nb"],
'POMEGRANATE Naive Bayes (Greedy-gaussian)': pomegranate_greedy_nonlinear_dict_scores["nb_g"],
'POMEGRANATE Naive Bayes (Greedy-multinomial)': pomegranate_greedy_nonlinear_dict_scores[
"nb_m"],
'POMEGRANATE Naive Bayes (Greedy-complement)': pomegranate_greedy_nonlinear_dict_scores[
"nb_c"],
'POMEGRANATE Naive Bayes (Exact-bernoulli)': pomegranate_exact_nonlinear_dict_scores["nb"],
'POMEGRANATE Naive Bayes (Exact-gaussian)': pomegranate_exact_nonlinear_dict_scores["nb_g"],
'POMEGRANATE Naive Bayes (Exact-multinomial)': pomegranate_exact_nonlinear_dict_scores["nb_m"],
'POMEGRANATE Naive Bayes (Exact-complement)': pomegranate_exact_nonlinear_dict_scores["nb_c"],
'PGMPY Naive Bayes (HC-bernoulli)': pgmpy_hc_nonlinear_dict_scores["nb"],
'PGMPY Naive Bayes (HC-gaussian)': pgmpy_hc_nonlinear_dict_scores["nb_g"],
'PGMPY Naive Bayes (HC-multinomial)': pgmpy_hc_nonlinear_dict_scores["nb_m"],
'PGMPY Naive Bayes (HC-complement)': pgmpy_hc_nonlinear_dict_scores["nb_c"],
'PGMPY Naive Bayes (MMHC-bernoulli)': pgmpy_mmhc_nonlinear_dict_scores["nb"],
'PGMPY Naive Bayes (MMHC-gaussian)': pgmpy_mmhc_nonlinear_dict_scores["nb_g"],
'PGMPY Naive Bayes (MMHC-multinomial)': pgmpy_mmhc_nonlinear_dict_scores["nb_m"],
'PGMPY Naive Bayes (MMHC-complement)': pgmpy_mmhc_nonlinear_dict_scores["nb_c"],
'PGMPY Naive Bayes (TREE-bernoulli)': pgmpy_tree_nonlinear_dict_scores["nb"],
'PGMPY Naive Bayes (TREE-gaussian)': pgmpy_tree_nonlinear_dict_scores["nb_g"],
'PGMPY Naive Bayes (TREE-multinomial)': pgmpy_tree_nonlinear_dict_scores["nb_m"],
'PGMPY Naive Bayes (TREE-complement)': pgmpy_tree_nonlinear_dict_scores["nb_c"],
'BN Support Vector Machine (HC-sigmoid)': bnlearn_nonlinear_dict_scores["svm"],
'BN Support Vector Machine (HC-polynomial)': bnlearn_nonlinear_dict_scores["svm_po"],
'BN Support Vector Machine (HC-rbf)': bnlearn_nonlinear_dict_scores["svm_r"],
'BN Support Vector Machine (TABU-sigmoid)': bnlearn_tabu_nonlinear_dict_scores["svm"],
'BN Support Vector Machine (TABU-polynomial)': bnlearn_tabu_nonlinear_dict_scores["svm_po"],
'BN Support Vector Machine (TABU-rbf)': bnlearn_tabu_nonlinear_dict_scores["svm_r"],
#'BN Support Vector Machine (PC-sigmoid)': bnlearn_pc_nonlinear_dict_scores["svm"],
#'BN Support Vector Machine (PC-polynomial)': bnlearn_pc_nonlinear_dict_scores["svm_po"],
#'BN Support Vector Machine (PC-rbf)': bnlearn_pc_nonlinear_dict_scores["svm_r"],
'BN Support Vector Machine (MMHC-sigmoid)': bnlearn_mmhc_nonlinear_dict_scores["svm"],
'BN Support Vector Machine (MMHC-polynomial)': bnlearn_mmhc_nonlinear_dict_scores["svm_po"],
'BN Support Vector Machine (MMHC-rbf)': bnlearn_mmhc_nonlinear_dict_scores["svm_r"],
'BN Support Vector Machine (RSMAX2-sigmoid)': bnlearn_rsmax2_nonlinear_dict_scores["svm"],
'BN Support Vector Machine (RSMAX2-polynomial)': bnlearn_rsmax2_nonlinear_dict_scores[
"svm_po"],
'BN Support Vector Machine (RSMAX2-rbf)': bnlearn_rsmax2_nonlinear_dict_scores["svm_r"],
'BN Support Vector Machine (H2PC-sigmoid)': bnlearn_h2pc_nonlinear_dict_scores["svm"],
'BN Support Vector Machine (H2PC-polynomial)': bnlearn_h2pc_nonlinear_dict_scores["svm_po"],
'BN Support Vector Machine (H2PC-rbf)': bnlearn_h2pc_nonlinear_dict_scores["svm_r"],
'NT Support Vector Machine (logistic-sigmoid)': notears_nonlinear_dict_scores["svm"],
'NT Support Vector Machine (logistic-polynomial)': notears_nonlinear_dict_scores["svm_po"],
'NT Support Vector Machine (logistic-rbf)': notears_nonlinear_dict_scores["svm_r"],
'NT Support Vector Machine (L2-sigmoid)': notears_l2_nonlinear_dict_scores["svm"],
'NT Support Vector Machine (L2-polynomial)': notears_l2_nonlinear_dict_scores["svm_po"],
'NT Support Vector Machine (L2-rbf)': notears_l2_nonlinear_dict_scores["svm_r"],
'NT Support Vector Machine (Poisson-sigmoid)': notears_poisson_nonlinear_dict_scores["svm"],
'NT Support Vector Machine (Poisson-polynomial)': notears_poisson_nonlinear_dict_scores[
"svm_po"],
'NT Support Vector Machine (Poisson-rbf)': notears_poisson_nonlinear_dict_scores["svm_r"],
'Pomegranate Support Vector Machine (Exact-sigmoid)': pomegranate_exact_nonlinear_dict_scores[
"svm"], 'Pomegranate Support Vector Machine (Exact-polynomial)':
pomegranate_exact_nonlinear_dict_scores["svm_po"],
'Pomegranate Support Vector Machine (Exact-rbf)': pomegranate_exact_nonlinear_dict_scores[
"svm_r"], 'Pomegranate Support Vector Machine (Greedy-sigmoid)':
pomegranate_greedy_nonlinear_dict_scores["svm"],
'Pomegranate Support Vector Machine (Greedy-polynomial)':
pomegranate_greedy_nonlinear_dict_scores["svm_po"],
'Pomegranate Support Vector Machine (Greedy-rbf)': pomegranate_greedy_nonlinear_dict_scores[
"svm_r"],
'PGMPY Support Vector Machine (HC-sigmoid)': pgmpy_hc_nonlinear_dict_scores["svm"],
'PGMPY Support Vector Machine (HC-polynomial)': pgmpy_hc_nonlinear_dict_scores["svm_po"],
'PGMPY Support Vector Machine (HC-rbf)': pgmpy_hc_nonlinear_dict_scores["svm_r"],
'PGMPY Support Vector Machine (MMHC-sigmoid)': pgmpy_mmhc_nonlinear_dict_scores["svm"],
'PGMPY Support Vector Machine (MMHC-polynomial)': pgmpy_mmhc_nonlinear_dict_scores["svm_po"],
'PGMPY Support Vector Machine (MMHC-rbf)': pgmpy_mmhc_nonlinear_dict_scores["svm_r"],
'PGMPY Support Vector Machine (TREE-sigmoid)': pgmpy_tree_nonlinear_dict_scores["svm"],
'PGMPY Support Vector Machine (TREE-polynomial)': pgmpy_tree_nonlinear_dict_scores["svm_po"],
'PGMPY Support Vector Machine (TREE-rbf)': pgmpy_tree_nonlinear_dict_scores["svm_r"],
'BN K Nearest Neighbor (HC-weight)': bnlearn_nonlinear_dict_scores["knn"],
'BN K Nearest Neighbor (HC-distance)': bnlearn_nonlinear_dict_scores["knn_d"],
'BN K Nearest Neighbor (TABU-weight)': bnlearn_tabu_nonlinear_dict_scores["knn"],
'BN K Nearest Neighbor (TABU-distance)': bnlearn_tabu_nonlinear_dict_scores["knn_d"],
#'BN K Nearest Neighbor (PC-weight)': bnlearn_pc_nonlinear_dict_scores["knn"],
#'BN K Nearest Neighbor (PC-distance)': bnlearn_pc_nonlinear_dict_scores["knn_d"],
'BN K Nearest Neighbor (MMHC-weight)': bnlearn_mmhc_nonlinear_dict_scores["knn"],
'BN K Nearest Neighbor (MMHC-distance)': bnlearn_mmhc_nonlinear_dict_scores["knn_d"],
'BN K Nearest Neighbor (RSMAX2-weight)': bnlearn_rsmax2_nonlinear_dict_scores["knn"],
'BN K Nearest Neighbor (RSMAX2-distance)': bnlearn_rsmax2_nonlinear_dict_scores["knn_d"],
'BN K Nearest Neighbor (H2PC-weight)': bnlearn_h2pc_nonlinear_dict_scores["knn"],
'BN K Nearest Neighbor (H2PC-distance)': bnlearn_h2pc_nonlinear_dict_scores["knn_d"],
'NT K Nearest Neighbor (Logistic-weight)': notears_nonlinear_dict_scores["knn"],
'NT K Nearest Neighbor (Logistic-distance)': notears_nonlinear_dict_scores["knn_d"],
'NT K Nearest Neighbor (L2-weight)': notears_l2_nonlinear_dict_scores["knn"],
'NT K Nearest Neighbor (L2-distance)': notears_l2_nonlinear_dict_scores["knn_d"],
'NT K Nearest Neighbor (Poisson-weight)': notears_poisson_nonlinear_dict_scores["knn"],
'NT K Nearest Neighbor (Poisson-distance)': notears_poisson_nonlinear_dict_scores["knn_d"],
'POMEGRANATE K Nearest Neighbor (Exact-weight)': pomegranate_exact_nonlinear_dict_scores[
"knn"],
'POMEGRANATE K Nearest Neighbor (Exact-distance)': pomegranate_exact_nonlinear_dict_scores[
"knn_d"],
'POMEGRANATE K Nearest Neighbor (Greedy-weight)': pomegranate_greedy_nonlinear_dict_scores[
"knn"],
'POMEGRANATE K Nearest Neighbor (Greedy-distance)': pomegranate_greedy_nonlinear_dict_scores[
"knn_d"], 'PGMPY K Nearest Neighbor (HC-weight)': pgmpy_hc_nonlinear_dict_scores["knn"],
'PGMPY K Nearest Neighbor (HC-distance)': pgmpy_hc_nonlinear_dict_scores["knn_d"],
'PGMPY K Nearest Neighbor (MMHC-weight)': pgmpy_mmhc_nonlinear_dict_scores["knn"],
'PGMPY K Nearest Neighbor (MMHC-distance)': pgmpy_mmhc_nonlinear_dict_scores["knn_d"],
'PGMPY K Nearest Neighbor (TREE-weight)': pgmpy_tree_nonlinear_dict_scores["knn"],
'PGMPY K Nearest Neighbor (TREE-distance)': pgmpy_tree_nonlinear_dict_scores["knn_d"]}
top_learned_nonlinear = max(sim_nonlinear_workflows, key=sim_nonlinear_workflows.get)
print("Learned world - Nonlinear problem, Prediction: "+ top_learned_nonlinear + " (" + str(sim_nonlinear_workflows[top_learned_nonlinear]) + ")")
real_sparse_workflows = {'Decision Tree (gini)': real_sparse_dt_scores,
'Decision Tree (entropy)': real_sparse_dt_entropy_scores,
'Random Forest (gini)': real_sparse_rf_scores,
'Random Forest (entropy)': real_sparse_rf_entropy_scores,
'Logistic Regression (none)': real_sparse_lr_scores,
'Logistic Regression (l1)': real_sparse_lr_l1_scores,
'Logistic Regression (l2)': real_sparse_lr_l2_scores,
'Logistic Regression (elasticnet)': real_sparse_lr_elastic_scores,
'Naive Bayes (bernoulli)': real_sparse_gb_scores,
'Naive Bayes (multinomial)': real_sparse_gb_multi_scores,
'Naive Bayes (gaussian)': real_sparse_gb_gaussian_scores,
'Naive Bayes (complement)': real_sparse_gb_complement_scores,
'Support Vector Machine (sigmoid)': real_sparse_svm_scores,
'Support Vector Machine (polynomial)': real_sparse_svm_poly_scores,
'Support Vector Machine (rbf)': real_sparse_svm_rbf_scores,
'K Nearest Neighbor (uniform)': real_sparse_knn_scores,
'K Nearest Neighbor (distance)': real_sparse_knn_distance_scores}
top_real_sparse = max(real_sparse_workflows, key=real_sparse_workflows.get)
print("Real world - Sparse problem, Prediction: "+ top_real_sparse + " (" + str(real_sparse_workflows[top_real_sparse]) + ")")
sim_sparse_workflows = {'BN Decision Tree (HC-gini)': bnlearn_sparse_dict_scores["dt"],
'BN Decision Tree (HC-entropy)': bnlearn_sparse_dict_scores["dt_e"],
'BN Decision Tree (TABU-gini)': bnlearn_tabu_sparse_dict_scores["dt"],
'BN Decision Tree (TABU-entropy)': bnlearn_tabu_sparse_dict_scores["dt_e"],
#'BN Decision Tree (PC-gini)': bnlearn_pc_sparse_dict_scores["dt"],
#'BN Decision Tree (PC-entropy)': bnlearn_pc_sparse_dict_scores["dt_e"],
'BN Decision Tree (MMHC-gini)': bnlearn_mmhc_sparse_dict_scores["dt"],
'BN Decision Tree (MMHC-entropy)': bnlearn_mmhc_sparse_dict_scores["dt_e"],
'BN Decision Tree (RSMAX2-gini)': bnlearn_rsmax2_sparse_dict_scores["dt"],
'BN Decision Tree (RSMAX2-entropy)': bnlearn_rsmax2_sparse_dict_scores["dt_e"],
'BN Decision Tree (H2PC-gini)': bnlearn_h2pc_sparse_dict_scores["dt"],
'BN Decision Tree (H2PC-entropy)': bnlearn_h2pc_sparse_dict_scores["dt_e"],
'NT Decision Tree (Logistic-gini)': notears_sparse_dict_scores["dt"],
'NT Decision Tree (Logistic-entropy)': notears_sparse_dict_scores["dt_e"],
'NT Decision Tree (L2-gini)': notears_l2_sparse_dict_scores["dt"],
'NT Decision Tree (L2-entropy)': notears_l2_sparse_dict_scores["dt_e"],
'NT Decision Tree (Poisson-gini)': notears_poisson_sparse_dict_scores["dt"],
'NT Decision Tree (Poisson-entropy)': notears_poisson_sparse_dict_scores["dt_e"],
'POMEGRANATE Decision Tree (Exact-gini)': pomegranate_exact_sparse_dict_scores["dt"],
'POMEGRANATE Decision Tree (Exact-entropy)': pomegranate_exact_sparse_dict_scores["dt_e"],
'POMEGRANATE Decision Tree (Greedy-gini)': pomegranate_greedy_sparse_dict_scores["dt"],
'POMEGRANATE Decision Tree (Greedy-entropy)': pomegranate_greedy_sparse_dict_scores["dt_e"],
'PGMPY Decision Tree (HC-gini)': pgmpy_hc_sparse_dict_scores["dt"],
'PGMPY Decision Tree (HC-entropy)': pgmpy_hc_sparse_dict_scores["dt_e"],
'PGMPY Decision Tree (MMHC-gini)': pgmpy_mmhc_sparse_dict_scores["dt"],
'PGMPY Decision Tree (HC-entropy)': pgmpy_mmhc_sparse_dict_scores["dt_e"],
'PGMPY Decision Tree (TREE-gini)': pgmpy_tree_sparse_dict_scores["dt"],
'PGMPY Decision Tree (TREE-entropy)': pgmpy_tree_sparse_dict_scores["dt_e"],
'BN Random Forest (HC-gini)': bnlearn_sparse_dict_scores["rf"],
'BN Random Forest (HC-entropy)': bnlearn_sparse_dict_scores["rf_e"],
'BN Random Forest (TABU-gini)': bnlearn_tabu_sparse_dict_scores["rf"],
'BN Random Forest (TABU-entropy)': bnlearn_tabu_sparse_dict_scores["rf_e"],
#'BN Random Forest (PC-gini)': bnlearn_pc_sparse_dict_scores["rf"],
#'BN Random Forest (PC-entropy)': bnlearn_pc_sparse_dict_scores["rf_e"],
'BN Random Forest (MMHC-gini)': bnlearn_mmhc_sparse_dict_scores["rf"],
'BN Random Forest (MMHC-entropy)': bnlearn_mmhc_sparse_dict_scores["rf_e"],
'BN Random Forest (RSMAX2-gini)': bnlearn_rsmax2_sparse_dict_scores["rf"],
'BN Random Forest (RSMAX2-entropy)': bnlearn_rsmax2_sparse_dict_scores["rf_e"],
'BN Random Forest (H2PC-gini)': bnlearn_h2pc_sparse_dict_scores["rf"],
'BN Random Forest (H2PC-entropy)': bnlearn_h2pc_sparse_dict_scores["rf_e"],
'NT Random Forest (Logistic-gini)': notears_sparse_dict_scores["rf"],
'NT Random Forest (Logistic-entropy)': notears_sparse_dict_scores["rf_e"],
'NT Random Forest (L2-gini)': notears_l2_sparse_dict_scores["rf"],
'NT Random Forest (l2-entropy)': notears_l2_sparse_dict_scores["rf_e"],
'NT Random Forest (Poisson-gini)': notears_poisson_sparse_dict_scores["rf"],
'NT Random Forest (Poisson-entropy)': notears_poisson_sparse_dict_scores["rf_e"],
'POMEGRANATE Random Forest (Exact-gini)': pomegranate_exact_sparse_dict_scores["rf"],
'POMEGRANATE Random Forest (Exact-entropy)': pomegranate_exact_sparse_dict_scores["rf_e"],
'POMEGRANATE Random Forest (Greedy-gini)': pomegranate_greedy_sparse_dict_scores["rf"],
'POMEGRANATE Random Forest (Greedy-entropy)': pomegranate_greedy_sparse_dict_scores["rf_e"],
'PGMPY Random Forest (HC-gini)': pgmpy_hc_sparse_dict_scores["rf"],
'PGMPY Random Forest (HC-entropy)': pgmpy_hc_sparse_dict_scores["rf_e"],
'PGMPY Random Forest (MMHC-gini)': pgmpy_mmhc_sparse_dict_scores["rf"],
'PGMPY Random Forest (HC-entropy)': pgmpy_mmhc_sparse_dict_scores["rf_e"],
'PGMPY Random Forest (TREE-gini)': pgmpy_tree_sparse_dict_scores["rf"],
'PGMPY Random Forest (TREE-entropy)': pgmpy_tree_sparse_dict_scores["rf_e"],
'BN Logistic Regression (HC-none)': bnlearn_sparse_dict_scores["lr"],
'BN Logistic Regression (HC-l1)': bnlearn_sparse_dict_scores["lr_l1"],
'BN Logistic Regression (HC-l2)': bnlearn_sparse_dict_scores["lr_l2"],
'BN Logistic Regression (HC-elastic)': bnlearn_sparse_dict_scores["lr_e"],
'BN Logistic Regression (TABU-none)': bnlearn_tabu_sparse_dict_scores["lr"],
'BN Logistic Regression (TABU-l1)': bnlearn_tabu_sparse_dict_scores["lr_l1"],
'BN Logistic Regression (TABU-l2)': bnlearn_tabu_sparse_dict_scores["lr_l2"],
'BN Logistic Regression (TABU-elastic)': bnlearn_tabu_sparse_dict_scores["lr_e"],
#'BN Logistic Regression (PC-none)': bnlearn_pc_sparse_dict_scores["lr"],
#'BN Logistic Regression (PC-l1)': bnlearn_pc_sparse_dict_scores["lr_l1"],
#'BN Logistic Regression (PC-l2)': bnlearn_pc_sparse_dict_scores["lr_l2"],
#'BN Logistic Regression (PC-elastic)': bnlearn_pc_sparse_dict_scores["lr_e"],
'BN Logistic Regression (MMHC-none)': bnlearn_mmhc_sparse_dict_scores["lr"],
'BN Logistic Regression (MMHC-l1)': bnlearn_mmhc_sparse_dict_scores["lr_l1"],
'BN Logistic Regression (MMHC-l2)': bnlearn_mmhc_sparse_dict_scores["lr_l2"],
'BN Logistic Regression (MMHC-elastic)': bnlearn_mmhc_sparse_dict_scores["lr_e"],
'BN Logistic Regression (RSMAX2-none)': bnlearn_rsmax2_sparse_dict_scores["lr"],
'BN Logistic Regression (RSMAX2-l1)': bnlearn_rsmax2_sparse_dict_scores["lr_l1"],
'BN Logistic Regression (RSMAX2-l2)': bnlearn_rsmax2_sparse_dict_scores["lr_l2"],
'BN Logistic Regression (RSMAX2-elastic)': bnlearn_rsmax2_sparse_dict_scores["lr_e"],
'BN Logistic Regression (H2PC-none)': bnlearn_h2pc_sparse_dict_scores["lr"],
'BN Logistic Regression (H2PC-l1)': bnlearn_h2pc_sparse_dict_scores["lr_l1"],
'BN Logistic Regression (H2PC-l2)': bnlearn_h2pc_sparse_dict_scores["lr_l2"],
'BN Logistic Regression (H2PC-elastic)': bnlearn_h2pc_sparse_dict_scores["lr_e"],
'POMEGRANATE Logistic Regression (Exact-none)': pomegranate_exact_sparse_dict_scores["lr"],
'POMEGRANATE Logistic Regression (Exact-l1)': pomegranate_exact_sparse_dict_scores["lr_l1"],
'POMEGRANATE Logistic Regression (Exact-l2)': pomegranate_exact_sparse_dict_scores["lr_l2"],
'POMEGRANATE Logistic Regression (Exact-elastic)': pomegranate_exact_sparse_dict_scores[
"lr_e"],
'POMEGRANATE Logistic Regression (Greedy-none)': pomegranate_greedy_sparse_dict_scores[
"lr"],
'POMEGRANATE Logistic Regression (Greedy-l1)': pomegranate_greedy_sparse_dict_scores[
"lr_l1"],
'POMEGRANATE Logistic Regression (Greedy-l2)': pomegranate_greedy_sparse_dict_scores[
"lr_l2"],
'POMEGRANATE Logistic Regression (Greedy-elastic)': pomegranate_greedy_sparse_dict_scores[
"lr_e"], 'PGMPY Logistic Regression (HC-none)': pgmpy_hc_sparse_dict_scores["lr"],
'PGMPY Logistic Regression (HC-l1)': pgmpy_hc_sparse_dict_scores["lr_l1"],
'PGMPY Logistic Regression (MMHC-l2)': pgmpy_mmhc_sparse_dict_scores["lr_l2"],
'PGMPY Logistic Regression (HC-elastic)': pgmpy_mmhc_sparse_dict_scores["lr_e"],
'PGMPY Logistic Regression (TREE-none)': pgmpy_tree_sparse_dict_scores["lr"],
'PGMPY Logistic Regression (TREE-l1)': pgmpy_tree_sparse_dict_scores["lr_l1"],
'PGMPY Logistic Regression (TREE-l2)': pgmpy_tree_sparse_dict_scores["lr_l2"],
'PGMPY Logistic Regression (TREE-elastic)': pgmpy_tree_sparse_dict_scores["lr_e"],
'PGMPY Logistic Regression (MMHC-none)': pgmpy_mmhc_sparse_dict_scores["lr"],
'PGMPY Logistic Regression (MMHC-l1)': pgmpy_mmhc_sparse_dict_scores["lr_l1"],
'PGMPY Logistic Regression (MMHC-l2)': pgmpy_mmhc_sparse_dict_scores["lr_l2"],
'PGMPY Logistic Regression (MMHC-elastic)': pgmpy_mmhc_sparse_dict_scores["lr_e"],
'NT Logistic Regression (Logistic-none)': notears_sparse_dict_scores["lr"],
'NT Logistic Regression (Logistic-l1)': notears_sparse_dict_scores["lr_l1"],
'NT Logistic Regression (Logistic-l2)': notears_sparse_dict_scores["lr_l2"],
'NT Logistic Regression (Logistic-elastic)': notears_sparse_dict_scores["lr_e"],
'NT Logistic Regression (L2-none)': notears_l2_sparse_dict_scores["lr"],
'NT Logistic Regression (L2-l1)': notears_l2_sparse_dict_scores["lr_l1"],
'NT Logistic Regression (L2-l2)': notears_l2_sparse_dict_scores["lr_l2"],
'NT Logistic Regression (L2-elastic)': notears_l2_sparse_dict_scores["lr_e"],
'NT Logistic Regression (Poisson-none)': notears_poisson_sparse_dict_scores["lr"],
'NT Logistic Regression (Poisson-l1)': notears_poisson_sparse_dict_scores["lr_l1"],
'NT Logistic Regression (Poisson-l2)': notears_poisson_sparse_dict_scores["lr_l2"],
'NT Logistic Regression (Poisson-elastic)': notears_poisson_sparse_dict_scores["lr_e"],
'BN Naive Bayes (HC-bernoulli)': bnlearn_sparse_dict_scores["nb"],
'BN Naive Bayes (HC-gaussian)': bnlearn_sparse_dict_scores["nb_g"],
'BN Naive Bayes (HC-multinomial)': bnlearn_sparse_dict_scores["nb_m"],
'BN Naive Bayes (HC-complement)': bnlearn_sparse_dict_scores["nb_c"],
'BN Naive Bayes (TABU-bernoulli)': bnlearn_tabu_sparse_dict_scores["nb"],
'BN Naive Bayes (TABU-gaussian)': bnlearn_tabu_sparse_dict_scores["nb_g"],
'BN Naive Bayes (TABU-multinomial)': bnlearn_tabu_sparse_dict_scores["nb_m"],
'BN Naive Bayes (TABU-complement)': bnlearn_tabu_sparse_dict_scores["nb_c"],
#'BN Naive Bayes (PC-bernoulli)': bnlearn_pc_sparse_dict_scores["nb"],
#'BN Naive Bayes (PC-gaussian)': bnlearn_pc_sparse_dict_scores["nb_g"],
#'BN Naive Bayes (PC-multinomial)': bnlearn_pc_sparse_dict_scores["nb_m"],
#'BN Naive Bayes (PC-complement)': bnlearn_pc_sparse_dict_scores["nb_c"],
'BN Naive Bayes (MMHC-bernoulli)': bnlearn_mmhc_sparse_dict_scores["nb"],
'BN Naive Bayes (MMHC-gaussian)': bnlearn_mmhc_sparse_dict_scores["nb_g"],
'BN Naive Bayes (MMHC-multinomial)': bnlearn_mmhc_sparse_dict_scores["nb_m"],
'BN Naive Bayes (MMHC-complement)': bnlearn_mmhc_sparse_dict_scores["nb_c"],
'BN Naive Bayes (RSMAX2-bernoulli)': bnlearn_rsmax2_sparse_dict_scores["nb"],
'BN Naive Bayes (RSMAX2-gaussian)': bnlearn_rsmax2_sparse_dict_scores["nb_g"],
'BN Naive Bayes (RSMAX2-multinomial)': bnlearn_rsmax2_sparse_dict_scores["nb_m"],
'BN Naive Bayes (RSMAX2-complement)': bnlearn_rsmax2_sparse_dict_scores["nb_c"],
'BN Naive Bayes (H2PC-bernoulli)': bnlearn_h2pc_sparse_dict_scores["nb"],
'BN Naive Bayes (H2PC-gaussian)': bnlearn_h2pc_sparse_dict_scores["nb_g"],
'BN Naive Bayes (H2PC-multinomial)': bnlearn_h2pc_sparse_dict_scores["nb_m"],
'BN Naive Bayes (H2PC-complement)': bnlearn_h2pc_sparse_dict_scores["nb_c"],
'NT Naive Bayes (Logistic-bernoulli)': notears_sparse_dict_scores["nb"],
'NT Naive Bayes (Logistic-gaussian)': notears_sparse_dict_scores["nb_g"],
'NT Naive Bayes (Logistic-multinomial)': notears_sparse_dict_scores["nb_m"],
'NT Naive Bayes (Logistic-complement)': notears_sparse_dict_scores["nb_c"],
'NT Naive Bayes (L2-bernoulli)': notears_l2_sparse_dict_scores["nb"],
'NT Naive Bayes (L2-gaussian)': notears_l2_sparse_dict_scores["nb_g"],
'NT Naive Bayes (L2-multinomial)': notears_l2_sparse_dict_scores["nb_m"],
'NT Naive Bayes (L2-complement)': notears_l2_sparse_dict_scores["nb_c"],
'NT Naive Bayes (Poisson-bernoulli)': notears_poisson_sparse_dict_scores["nb"],
'NT Naive Bayes (Poisson-gaussian)': notears_poisson_sparse_dict_scores["nb_g"],
'NT Naive Bayes (Poisson-multinomial)': notears_poisson_sparse_dict_scores["nb_m"],
'NT Naive Bayes (Poisson-complement)': notears_poisson_sparse_dict_scores["nb_c"],
'POMEGRANATE Naive Bayes (Greedy-bernoulli)': pomegranate_greedy_sparse_dict_scores["nb"],
'POMEGRANATE Naive Bayes (Greedy-gaussian)': pomegranate_greedy_sparse_dict_scores["nb_g"],
'POMEGRANATE Naive Bayes (Greedy-multinomial)': pomegranate_greedy_sparse_dict_scores[
"nb_m"],
'POMEGRANATE Naive Bayes (Greedy-complement)': pomegranate_greedy_sparse_dict_scores[
"nb_c"],
'POMEGRANATE Naive Bayes (Exact-bernoulli)': pomegranate_exact_sparse_dict_scores["nb"],
'POMEGRANATE Naive Bayes (Exact-gaussian)': pomegranate_exact_sparse_dict_scores["nb_g"],
'POMEGRANATE Naive Bayes (Exact-multinomial)': pomegranate_exact_sparse_dict_scores["nb_m"],
'POMEGRANATE Naive Bayes (Exact-complement)': pomegranate_exact_sparse_dict_scores["nb_c"],
'PGMPY Naive Bayes (HC-bernoulli)': pgmpy_hc_sparse_dict_scores["nb"],
'PGMPY Naive Bayes (HC-gaussian)': pgmpy_hc_sparse_dict_scores["nb_g"],
'PGMPY Naive Bayes (HC-multinomial)': pgmpy_hc_sparse_dict_scores["nb_m"],
'PGMPY Naive Bayes (HC-complement)': pgmpy_hc_sparse_dict_scores["nb_c"],
'PGMPY Naive Bayes (MMHC-bernoulli)': pgmpy_mmhc_sparse_dict_scores["nb"],
'PGMPY Naive Bayes (MMHC-gaussian)': pgmpy_mmhc_sparse_dict_scores["nb_g"],
'PGMPY Naive Bayes (MMHC-multinomial)': pgmpy_mmhc_sparse_dict_scores["nb_m"],
'PGMPY Naive Bayes (MMHC-complement)': pgmpy_mmhc_sparse_dict_scores["nb_c"],
'PGMPY Naive Bayes (TREE-bernoulli)': pgmpy_tree_sparse_dict_scores["nb"],
'PGMPY Naive Bayes (TREE-gaussian)': pgmpy_tree_sparse_dict_scores["nb_g"],
'PGMPY Naive Bayes (TREE-multinomial)': pgmpy_tree_sparse_dict_scores["nb_m"],
'PGMPY Naive Bayes (TREE-complement)': pgmpy_tree_sparse_dict_scores["nb_c"],
'BN Support Vector Machine (HC-sigmoid)': bnlearn_sparse_dict_scores["svm"],
'BN Support Vector Machine (HC-polynomial)': bnlearn_sparse_dict_scores["svm_po"],
'BN Support Vector Machine (HC-rbf)': bnlearn_sparse_dict_scores["svm_r"],
'BN Support Vector Machine (TABU-sigmoid)': bnlearn_tabu_sparse_dict_scores["svm"],
'BN Support Vector Machine (TABU-polynomial)': bnlearn_tabu_sparse_dict_scores["svm_po"],
'BN Support Vector Machine (TABU-rbf)': bnlearn_tabu_sparse_dict_scores["svm_r"],
#'BN Support Vector Machine (PC-sigmoid)': bnlearn_pc_sparse_dict_scores["svm"],
#'BN Support Vector Machine (PC-polynomial)': bnlearn_pc_sparse_dict_scores["svm_po"],
#'BN Support Vector Machine (PC-rbf)': bnlearn_pc_sparse_dict_scores["svm_r"],
'BN Support Vector Machine (MMHC-sigmoid)': bnlearn_mmhc_sparse_dict_scores["svm"],
'BN Support Vector Machine (MMHC-polynomial)': bnlearn_mmhc_sparse_dict_scores["svm_po"],
'BN Support Vector Machine (MMHC-rbf)': bnlearn_mmhc_sparse_dict_scores["svm_r"],
'BN Support Vector Machine (RSMAX2-sigmoid)': bnlearn_rsmax2_sparse_dict_scores["svm"],
'BN Support Vector Machine (RSMAX2-polynomial)': bnlearn_rsmax2_sparse_dict_scores[
"svm_po"],
'BN Support Vector Machine (RSMAX2-rbf)': bnlearn_rsmax2_sparse_dict_scores["svm_r"],
'BN Support Vector Machine (H2PC-sigmoid)': bnlearn_h2pc_sparse_dict_scores["svm"],
'BN Support Vector Machine (H2PC-polynomial)': bnlearn_h2pc_sparse_dict_scores["svm_po"],
'BN Support Vector Machine (H2PC-rbf)': bnlearn_h2pc_sparse_dict_scores["svm_r"],
'NT Support Vector Machine (logistic-sigmoid)': notears_sparse_dict_scores["svm"],
'NT Support Vector Machine (logistic-polynomial)': notears_sparse_dict_scores["svm_po"],
'NT Support Vector Machine (logistic-rbf)': notears_sparse_dict_scores["svm_r"],
'NT Support Vector Machine (L2-sigmoid)': notears_l2_sparse_dict_scores["svm"],
'NT Support Vector Machine (L2-polynomial)': notears_l2_sparse_dict_scores["svm_po"],
'NT Support Vector Machine (L2-rbf)': notears_l2_sparse_dict_scores["svm_r"],
'NT Support Vector Machine (Poisson-sigmoid)': notears_poisson_sparse_dict_scores["svm"],
'NT Support Vector Machine (Poisson-polynomial)': notears_poisson_sparse_dict_scores[
"svm_po"],
'NT Support Vector Machine (Poisson-rbf)': notears_poisson_sparse_dict_scores["svm_r"],
'Pomegranate Support Vector Machine (Exact-sigmoid)': pomegranate_exact_sparse_dict_scores[
"svm"], 'Pomegranate Support Vector Machine (Exact-polynomial)':
pomegranate_exact_sparse_dict_scores["svm_po"],
'Pomegranate Support Vector Machine (Exact-rbf)': pomegranate_exact_sparse_dict_scores[
"svm_r"], 'Pomegranate Support Vector Machine (Greedy-sigmoid)':
pomegranate_greedy_sparse_dict_scores["svm"],
'Pomegranate Support Vector Machine (Greedy-polynomial)':
pomegranate_greedy_sparse_dict_scores["svm_po"],
'Pomegranate Support Vector Machine (Greedy-rbf)': pomegranate_greedy_sparse_dict_scores[
"svm_r"],
'PGMPY Support Vector Machine (HC-sigmoid)': pgmpy_hc_sparse_dict_scores["svm"],
'PGMPY Support Vector Machine (HC-polynomial)': pgmpy_hc_sparse_dict_scores["svm_po"],
'PGMPY Support Vector Machine (HC-rbf)': pgmpy_hc_sparse_dict_scores["svm_r"],
'PGMPY Support Vector Machine (MMHC-sigmoid)': pgmpy_mmhc_sparse_dict_scores["svm"],
'PGMPY Support Vector Machine (MMHC-polynomial)': pgmpy_mmhc_sparse_dict_scores["svm_po"],
'PGMPY Support Vector Machine (MMHC-rbf)': pgmpy_mmhc_sparse_dict_scores["svm_r"],
'PGMPY Support Vector Machine (TREE-sigmoid)': pgmpy_tree_sparse_dict_scores["svm"],
'PGMPY Support Vector Machine (TREE-polynomial)': pgmpy_tree_sparse_dict_scores["svm_po"],
'PGMPY Support Vector Machine (TREE-rbf)': pgmpy_tree_sparse_dict_scores["svm_r"],
'BN K Nearest Neighbor (HC-weight)': bnlearn_sparse_dict_scores["knn"],
'BN K Nearest Neighbor (HC-distance)': bnlearn_sparse_dict_scores["knn_d"],
'BN K Nearest Neighbor (TABU-weight)': bnlearn_tabu_sparse_dict_scores["knn"],
'BN K Nearest Neighbor (TABU-distance)': bnlearn_tabu_sparse_dict_scores["knn_d"],
#'BN K Nearest Neighbor (PC-weight)': bnlearn_pc_sparse_dict_scores["knn"],
#'BN K Nearest Neighbor (PC-distance)': bnlearn_pc_sparse_dict_scores["knn_d"],
'BN K Nearest Neighbor (MMHC-weight)': bnlearn_mmhc_sparse_dict_scores["knn"],
'BN K Nearest Neighbor (MMHC-distance)': bnlearn_mmhc_sparse_dict_scores["knn_d"],
'BN K Nearest Neighbor (RSMAX2-weight)': bnlearn_rsmax2_sparse_dict_scores["knn"],
'BN K Nearest Neighbor (RSMAX2-distance)': bnlearn_rsmax2_sparse_dict_scores["knn_d"],
'BN K Nearest Neighbor (H2PC-weight)': bnlearn_h2pc_sparse_dict_scores["knn"],
'BN K Nearest Neighbor (H2PC-distance)': bnlearn_h2pc_sparse_dict_scores["knn_d"],
'NT K Nearest Neighbor (Logistic-weight)': notears_sparse_dict_scores["knn"],
'NT K Nearest Neighbor (Logistic-distance)': notears_sparse_dict_scores["knn_d"],
'NT K Nearest Neighbor (L2-weight)': notears_l2_sparse_dict_scores["knn"],
'NT K Nearest Neighbor (L2-distance)': notears_l2_sparse_dict_scores["knn_d"],
'NT K Nearest Neighbor (Poisson-weight)': notears_poisson_sparse_dict_scores["knn"],
'NT K Nearest Neighbor (Poisson-distance)': notears_poisson_sparse_dict_scores["knn_d"],
'POMEGRANATE K Nearest Neighbor (Exact-weight)': pomegranate_exact_sparse_dict_scores[
"knn"],
'POMEGRANATE K Nearest Neighbor (Exact-distance)': pomegranate_exact_sparse_dict_scores[
"knn_d"],
'POMEGRANATE K Nearest Neighbor (Greedy-weight)': pomegranate_greedy_sparse_dict_scores[
"knn"],
'POMEGRANATE K Nearest Neighbor (Greedy-distance)': pomegranate_greedy_sparse_dict_scores[
"knn_d"], 'PGMPY K Nearest Neighbor (HC-weight)': pgmpy_hc_sparse_dict_scores["knn"],
'PGMPY K Nearest Neighbor (HC-distance)': pgmpy_hc_sparse_dict_scores["knn_d"],
'PGMPY K Nearest Neighbor (MMHC-weight)': pgmpy_mmhc_sparse_dict_scores["knn"],
'PGMPY K Nearest Neighbor (MMHC-distance)': pgmpy_mmhc_sparse_dict_scores["knn_d"],
'PGMPY K Nearest Neighbor (TREE-weight)': pgmpy_tree_sparse_dict_scores["knn"],
'PGMPY K Nearest Neighbor (TREE-distance)': pgmpy_tree_sparse_dict_scores["knn_d"]}
top_learned_sparse = max(sim_sparse_workflows, key=sim_sparse_workflows.get)
print("Learned world - Sparse problem, Prediction: "+ top_learned_sparse + " (" + str(sim_sparse_workflows[top_learned_sparse]) + ")")
real_dimension_workflows = {'Decision Tree (gini)': real_dimension_dt_scores,
'Decision Tree (entropy)': real_dimension_dt_entropy_scores,
'Random Forest (gini)': real_dimension_rf_scores,
'Random Forest (entropy)': real_dimension_rf_entropy_scores,
'Logistic Regression (none)': real_dimension_lr_scores,
'Logistic Regression (l1)': real_dimension_lr_l1_scores,
'Logistic Regression (l2)': real_dimension_lr_l2_scores,
'Logistic Regression (elasticnet)': real_dimension_lr_elastic_scores,
'Naive Bayes (bernoulli)': real_dimension_gb_scores,
'Naive Bayes (multinomial)': real_dimension_gb_multi_scores,
'Naive Bayes (gaussian)': real_dimension_gb_gaussian_scores,
'Naive Bayes (complement)': real_dimension_gb_complement_scores,
'Support Vector Machine (sigmoid)': real_dimension_svm_scores,
'Support Vector Machine (polynomial)': real_dimension_svm_poly_scores,
'Support Vector Machine (rbf)': real_dimension_svm_rbf_scores,
'K Nearest Neighbor (uniform)': real_dimension_knn_scores,
'K Nearest Neighbor (distance)': real_dimension_knn_distance_scores}
top_real_dimension = max(real_dimension_workflows, key=real_dimension_workflows.get)
print("Real world - Dimensional problem, Prediction: "+ top_real_dimension + " (" + str(real_dimension_workflows[top_real_dimension]) + ")")
sim_dimension_workflows = {'BN Decision Tree (HC-gini)': bnlearn_dimension_dict_scores["dt"],
'BN Decision Tree (HC-entropy)': bnlearn_dimension_dict_scores["dt_e"],
'BN Decision Tree (TABU-gini)': bnlearn_tabu_dimension_dict_scores["dt"],
'BN Decision Tree (TABU-entropy)': bnlearn_tabu_dimension_dict_scores["dt_e"],
#'BN Decision Tree (PC-gini)': bnlearn_pc_dimension_dict_scores["dt"],
#'BN Decision Tree (PC-entropy)': bnlearn_pc_dimension_dict_scores["dt_e"],
'BN Decision Tree (MMHC-gini)': bnlearn_mmhc_dimension_dict_scores["dt"],
'BN Decision Tree (MMHC-entropy)': bnlearn_mmhc_dimension_dict_scores["dt_e"],
'BN Decision Tree (RSMAX2-gini)': bnlearn_rsmax2_dimension_dict_scores["dt"],
'BN Decision Tree (RSMAX2-entropy)': bnlearn_rsmax2_dimension_dict_scores["dt_e"],
'BN Decision Tree (H2PC-gini)': bnlearn_h2pc_dimension_dict_scores["dt"],
'BN Decision Tree (H2PC-entropy)': bnlearn_h2pc_dimension_dict_scores["dt_e"],
'NT Decision Tree (Logistic-gini)': notears_dimension_dict_scores["dt"],
'NT Decision Tree (Logistic-entropy)': notears_dimension_dict_scores["dt_e"],
'NT Decision Tree (L2-gini)': notears_l2_dimension_dict_scores["dt"],
'NT Decision Tree (L2-entropy)': notears_l2_dimension_dict_scores["dt_e"],
'NT Decision Tree (Poisson-gini)': notears_poisson_dimension_dict_scores["dt"],
'NT Decision Tree (Poisson-entropy)': notears_poisson_dimension_dict_scores["dt_e"],
'POMEGRANATE Decision Tree (Exact-gini)': pomegranate_exact_dimension_dict_scores["dt"],
'POMEGRANATE Decision Tree (Exact-entropy)': pomegranate_exact_dimension_dict_scores["dt_e"],
'POMEGRANATE Decision Tree (Greedy-gini)': pomegranate_greedy_dimension_dict_scores["dt"],
'POMEGRANATE Decision Tree (Greedy-entropy)': pomegranate_greedy_dimension_dict_scores["dt_e"],
'PGMPY Decision Tree (HC-gini)': pgmpy_hc_dimension_dict_scores["dt"],
'PGMPY Decision Tree (HC-entropy)': pgmpy_hc_dimension_dict_scores["dt_e"],
'PGMPY Decision Tree (MMHC-gini)': pgmpy_mmhc_dimension_dict_scores["dt"],
'PGMPY Decision Tree (HC-entropy)': pgmpy_mmhc_dimension_dict_scores["dt_e"],
'PGMPY Decision Tree (TREE-gini)': pgmpy_tree_dimension_dict_scores["dt"],
'PGMPY Decision Tree (TREE-entropy)': pgmpy_tree_dimension_dict_scores["dt_e"],
'BN Random Forest (HC-gini)': bnlearn_dimension_dict_scores["rf"],
'BN Random Forest (HC-entropy)': bnlearn_dimension_dict_scores["rf_e"],
'BN Random Forest (TABU-gini)': bnlearn_tabu_dimension_dict_scores["rf"],
'BN Random Forest (TABU-entropy)': bnlearn_tabu_dimension_dict_scores["rf_e"],
#'BN Random Forest (PC-gini)': bnlearn_pc_dimension_dict_scores["rf"],
#'BN Random Forest (PC-entropy)': bnlearn_pc_dimension_dict_scores["rf_e"],
'BN Random Forest (MMHC-gini)': bnlearn_mmhc_dimension_dict_scores["rf"],
'BN Random Forest (MMHC-entropy)': bnlearn_mmhc_dimension_dict_scores["rf_e"],
'BN Random Forest (RSMAX2-gini)': bnlearn_rsmax2_dimension_dict_scores["rf"],
'BN Random Forest (RSMAX2-entropy)': bnlearn_rsmax2_dimension_dict_scores["rf_e"],
'BN Random Forest (H2PC-gini)': bnlearn_h2pc_dimension_dict_scores["rf"],
'BN Random Forest (H2PC-entropy)': bnlearn_h2pc_dimension_dict_scores["rf_e"],
'NT Random Forest (Logistic-gini)': notears_dimension_dict_scores["rf"],
'NT Random Forest (Logistic-entropy)': notears_dimension_dict_scores["rf_e"],
'NT Random Forest (L2-gini)': notears_l2_dimension_dict_scores["rf"],
'NT Random Forest (l2-entropy)': notears_l2_dimension_dict_scores["rf_e"],
'NT Random Forest (Poisson-gini)': notears_poisson_dimension_dict_scores["rf"],
'NT Random Forest (Poisson-entropy)': notears_poisson_dimension_dict_scores["rf_e"],
'POMEGRANATE Random Forest (Exact-gini)': pomegranate_exact_dimension_dict_scores["rf"],
'POMEGRANATE Random Forest (Exact-entropy)': pomegranate_exact_dimension_dict_scores["rf_e"],
'POMEGRANATE Random Forest (Greedy-gini)': pomegranate_greedy_dimension_dict_scores["rf"],
'POMEGRANATE Random Forest (Greedy-entropy)': pomegranate_greedy_dimension_dict_scores["rf_e"],
'PGMPY Random Forest (HC-gini)': pgmpy_hc_dimension_dict_scores["rf"],
'PGMPY Random Forest (HC-entropy)': pgmpy_hc_dimension_dict_scores["rf_e"],
'PGMPY Random Forest (MMHC-gini)': pgmpy_mmhc_dimension_dict_scores["rf"],
'PGMPY Random Forest (HC-entropy)': pgmpy_mmhc_dimension_dict_scores["rf_e"],
'PGMPY Random Forest (TREE-gini)': pgmpy_tree_dimension_dict_scores["rf"],
'PGMPY Random Forest (TREE-entropy)': pgmpy_tree_dimension_dict_scores["rf_e"],
'BN Logistic Regression (HC-none)': bnlearn_dimension_dict_scores["lr"],
'BN Logistic Regression (HC-l1)': bnlearn_dimension_dict_scores["lr_l1"],
'BN Logistic Regression (HC-l2)': bnlearn_dimension_dict_scores["lr_l2"],
'BN Logistic Regression (HC-elastic)': bnlearn_dimension_dict_scores["lr_e"],
'BN Logistic Regression (TABU-none)': bnlearn_tabu_dimension_dict_scores["lr"],
'BN Logistic Regression (TABU-l1)': bnlearn_tabu_dimension_dict_scores["lr_l1"],
'BN Logistic Regression (TABU-l2)': bnlearn_tabu_dimension_dict_scores["lr_l2"],
'BN Logistic Regression (TABU-elastic)': bnlearn_tabu_dimension_dict_scores["lr_e"],
#'BN Logistic Regression (PC-none)': bnlearn_pc_dimension_dict_scores["lr"],
#'BN Logistic Regression (PC-l1)': bnlearn_pc_dimension_dict_scores["lr_l1"],
#'BN Logistic Regression (PC-l2)': bnlearn_pc_dimension_dict_scores["lr_l2"],
#'BN Logistic Regression (PC-elastic)': bnlearn_pc_dimension_dict_scores["lr_e"],
'BN Logistic Regression (MMHC-none)': bnlearn_mmhc_dimension_dict_scores["lr"],
'BN Logistic Regression (MMHC-l1)': bnlearn_mmhc_dimension_dict_scores["lr_l1"],
'BN Logistic Regression (MMHC-l2)': bnlearn_mmhc_dimension_dict_scores["lr_l2"],
'BN Logistic Regression (MMHC-elastic)': bnlearn_mmhc_dimension_dict_scores["lr_e"],
'BN Logistic Regression (RSMAX2-none)': bnlearn_rsmax2_dimension_dict_scores["lr"],
'BN Logistic Regression (RSMAX2-l1)': bnlearn_rsmax2_dimension_dict_scores["lr_l1"],
'BN Logistic Regression (RSMAX2-l2)': bnlearn_rsmax2_dimension_dict_scores["lr_l2"],
'BN Logistic Regression (RSMAX2-elastic)': bnlearn_rsmax2_dimension_dict_scores["lr_e"],
'BN Logistic Regression (H2PC-none)': bnlearn_h2pc_dimension_dict_scores["lr"],
'BN Logistic Regression (H2PC-l1)': bnlearn_h2pc_dimension_dict_scores["lr_l1"],
'BN Logistic Regression (H2PC-l2)': bnlearn_h2pc_dimension_dict_scores["lr_l2"],
'BN Logistic Regression (H2PC-elastic)': bnlearn_h2pc_dimension_dict_scores["lr_e"],
'POMEGRANATE Logistic Regression (Exact-none)': pomegranate_exact_dimension_dict_scores["lr"],
'POMEGRANATE Logistic Regression (Exact-l1)': pomegranate_exact_dimension_dict_scores["lr_l1"],
'POMEGRANATE Logistic Regression (Exact-l2)': pomegranate_exact_dimension_dict_scores["lr_l2"],
'POMEGRANATE Logistic Regression (Exact-elastic)': pomegranate_exact_dimension_dict_scores[
"lr_e"],
'POMEGRANATE Logistic Regression (Greedy-none)': pomegranate_greedy_dimension_dict_scores[
"lr"],
'POMEGRANATE Logistic Regression (Greedy-l1)': pomegranate_greedy_dimension_dict_scores[
"lr_l1"],
'POMEGRANATE Logistic Regression (Greedy-l2)': pomegranate_greedy_dimension_dict_scores[
"lr_l2"],
'POMEGRANATE Logistic Regression (Greedy-elastic)': pomegranate_greedy_dimension_dict_scores[
"lr_e"], 'PGMPY Logistic Regression (HC-none)': pgmpy_hc_dimension_dict_scores["lr"],
'PGMPY Logistic Regression (HC-l1)': pgmpy_hc_dimension_dict_scores["lr_l1"],
'PGMPY Logistic Regression (MMHC-l2)': pgmpy_mmhc_dimension_dict_scores["lr_l2"],
'PGMPY Logistic Regression (HC-elastic)': pgmpy_mmhc_dimension_dict_scores["lr_e"],
'PGMPY Logistic Regression (TREE-none)': pgmpy_tree_dimension_dict_scores["lr"],
'PGMPY Logistic Regression (TREE-l1)': pgmpy_tree_dimension_dict_scores["lr_l1"],
'PGMPY Logistic Regression (TREE-l2)': pgmpy_tree_dimension_dict_scores["lr_l2"],
'PGMPY Logistic Regression (TREE-elastic)': pgmpy_tree_dimension_dict_scores["lr_e"],
'PGMPY Logistic Regression (MMHC-none)': pgmpy_mmhc_dimension_dict_scores["lr"],
'PGMPY Logistic Regression (MMHC-l1)': pgmpy_mmhc_dimension_dict_scores["lr_l1"],
'PGMPY Logistic Regression (MMHC-l2)': pgmpy_mmhc_dimension_dict_scores["lr_l2"],
'PGMPY Logistic Regression (MMHC-elastic)': pgmpy_mmhc_dimension_dict_scores["lr_e"],
'NT Logistic Regression (Logistic-none)': notears_dimension_dict_scores["lr"],
'NT Logistic Regression (Logistic-l1)': notears_dimension_dict_scores["lr_l1"],
'NT Logistic Regression (Logistic-l2)': notears_dimension_dict_scores["lr_l2"],
'NT Logistic Regression (Logistic-elastic)': notears_dimension_dict_scores["lr_e"],
'NT Logistic Regression (L2-none)': notears_l2_dimension_dict_scores["lr"],
'NT Logistic Regression (L2-l1)': notears_l2_dimension_dict_scores["lr_l1"],
'NT Logistic Regression (L2-l2)': notears_l2_dimension_dict_scores["lr_l2"],
'NT Logistic Regression (L2-elastic)': notears_l2_dimension_dict_scores["lr_e"],
'NT Logistic Regression (Poisson-none)': notears_poisson_dimension_dict_scores["lr"],
'NT Logistic Regression (Poisson-l1)': notears_poisson_dimension_dict_scores["lr_l1"],
'NT Logistic Regression (Poisson-l2)': notears_poisson_dimension_dict_scores["lr_l2"],
'NT Logistic Regression (Poisson-elastic)': notears_poisson_dimension_dict_scores["lr_e"],
'BN Naive Bayes (HC-bernoulli)': bnlearn_dimension_dict_scores["nb"],
'BN Naive Bayes (HC-gaussian)': bnlearn_dimension_dict_scores["nb_g"],
'BN Naive Bayes (HC-multinomial)': bnlearn_dimension_dict_scores["nb_m"],
'BN Naive Bayes (HC-complement)': bnlearn_dimension_dict_scores["nb_c"],
'BN Naive Bayes (TABU-bernoulli)': bnlearn_tabu_dimension_dict_scores["nb"],
'BN Naive Bayes (TABU-gaussian)': bnlearn_tabu_dimension_dict_scores["nb_g"],
'BN Naive Bayes (TABU-multinomial)': bnlearn_tabu_dimension_dict_scores["nb_m"],
'BN Naive Bayes (TABU-complement)': bnlearn_tabu_dimension_dict_scores["nb_c"],
#'BN Naive Bayes (PC-bernoulli)': bnlearn_pc_dimension_dict_scores["nb"],
#'BN Naive Bayes (PC-gaussian)': bnlearn_pc_dimension_dict_scores["nb_g"],
#'BN Naive Bayes (PC-multinomial)': bnlearn_pc_dimension_dict_scores["nb_m"],
#'BN Naive Bayes (PC-complement)': bnlearn_pc_dimension_dict_scores["nb_c"],
'BN Naive Bayes (MMHC-bernoulli)': bnlearn_mmhc_dimension_dict_scores["nb"],
'BN Naive Bayes (MMHC-gaussian)': bnlearn_mmhc_dimension_dict_scores["nb_g"],
'BN Naive Bayes (MMHC-multinomial)': bnlearn_mmhc_dimension_dict_scores["nb_m"],
'BN Naive Bayes (MMHC-complement)': bnlearn_mmhc_dimension_dict_scores["nb_c"],
'BN Naive Bayes (RSMAX2-bernoulli)': bnlearn_rsmax2_dimension_dict_scores["nb"],
'BN Naive Bayes (RSMAX2-gaussian)': bnlearn_rsmax2_dimension_dict_scores["nb_g"],
'BN Naive Bayes (RSMAX2-multinomial)': bnlearn_rsmax2_dimension_dict_scores["nb_m"],
'BN Naive Bayes (RSMAX2-complement)': bnlearn_rsmax2_dimension_dict_scores["nb_c"],
'BN Naive Bayes (H2PC-bernoulli)': bnlearn_h2pc_dimension_dict_scores["nb"],
'BN Naive Bayes (H2PC-gaussian)': bnlearn_h2pc_dimension_dict_scores["nb_g"],
'BN Naive Bayes (H2PC-multinomial)': bnlearn_h2pc_dimension_dict_scores["nb_m"],
'BN Naive Bayes (H2PC-complement)': bnlearn_h2pc_dimension_dict_scores["nb_c"],
'NT Naive Bayes (Logistic-bernoulli)': notears_dimension_dict_scores["nb"],
'NT Naive Bayes (Logistic-gaussian)': notears_dimension_dict_scores["nb_g"],
'NT Naive Bayes (Logistic-multinomial)': notears_dimension_dict_scores["nb_m"],
'NT Naive Bayes (Logistic-complement)': notears_dimension_dict_scores["nb_c"],
'NT Naive Bayes (L2-bernoulli)': notears_l2_dimension_dict_scores["nb"],
'NT Naive Bayes (L2-gaussian)': notears_l2_dimension_dict_scores["nb_g"],
'NT Naive Bayes (L2-multinomial)': notears_l2_dimension_dict_scores["nb_m"],
'NT Naive Bayes (L2-complement)': notears_l2_dimension_dict_scores["nb_c"],
'NT Naive Bayes (Poisson-bernoulli)': notears_poisson_dimension_dict_scores["nb"],
'NT Naive Bayes (Poisson-gaussian)': notears_poisson_dimension_dict_scores["nb_g"],
'NT Naive Bayes (Poisson-multinomial)': notears_poisson_dimension_dict_scores["nb_m"],
'NT Naive Bayes (Poisson-complement)': notears_poisson_dimension_dict_scores["nb_c"],
'POMEGRANATE Naive Bayes (Greedy-bernoulli)': pomegranate_greedy_dimension_dict_scores["nb"],
'POMEGRANATE Naive Bayes (Greedy-gaussian)': pomegranate_greedy_dimension_dict_scores["nb_g"],
'POMEGRANATE Naive Bayes (Greedy-multinomial)': pomegranate_greedy_dimension_dict_scores[
"nb_m"],
'POMEGRANATE Naive Bayes (Greedy-complement)': pomegranate_greedy_dimension_dict_scores[
"nb_c"],
'POMEGRANATE Naive Bayes (Exact-bernoulli)': pomegranate_exact_dimension_dict_scores["nb"],
'POMEGRANATE Naive Bayes (Exact-gaussian)': pomegranate_exact_dimension_dict_scores["nb_g"],
'POMEGRANATE Naive Bayes (Exact-multinomial)': pomegranate_exact_dimension_dict_scores["nb_m"],
'POMEGRANATE Naive Bayes (Exact-complement)': pomegranate_exact_dimension_dict_scores["nb_c"],
'PGMPY Naive Bayes (HC-bernoulli)': pgmpy_hc_dimension_dict_scores["nb"],
'PGMPY Naive Bayes (HC-gaussian)': pgmpy_hc_dimension_dict_scores["nb_g"],
'PGMPY Naive Bayes (HC-multinomial)': pgmpy_hc_dimension_dict_scores["nb_m"],
'PGMPY Naive Bayes (HC-complement)': pgmpy_hc_dimension_dict_scores["nb_c"],
'PGMPY Naive Bayes (MMHC-bernoulli)': pgmpy_mmhc_dimension_dict_scores["nb"],
'PGMPY Naive Bayes (MMHC-gaussian)': pgmpy_mmhc_dimension_dict_scores["nb_g"],
'PGMPY Naive Bayes (MMHC-multinomial)': pgmpy_mmhc_dimension_dict_scores["nb_m"],
'PGMPY Naive Bayes (MMHC-complement)': pgmpy_mmhc_dimension_dict_scores["nb_c"],
'PGMPY Naive Bayes (TREE-bernoulli)': pgmpy_tree_dimension_dict_scores["nb"],
'PGMPY Naive Bayes (TREE-gaussian)': pgmpy_tree_dimension_dict_scores["nb_g"],
'PGMPY Naive Bayes (TREE-multinomial)': pgmpy_tree_dimension_dict_scores["nb_m"],
'PGMPY Naive Bayes (TREE-complement)': pgmpy_tree_dimension_dict_scores["nb_c"],
'BN Support Vector Machine (HC-sigmoid)': bnlearn_dimension_dict_scores["svm"],
'BN Support Vector Machine (HC-polynomial)': bnlearn_dimension_dict_scores["svm_po"],
'BN Support Vector Machine (HC-rbf)': bnlearn_dimension_dict_scores["svm_r"],
'BN Support Vector Machine (TABU-sigmoid)': bnlearn_tabu_dimension_dict_scores["svm"],
'BN Support Vector Machine (TABU-polynomial)': bnlearn_tabu_dimension_dict_scores["svm_po"],
'BN Support Vector Machine (TABU-rbf)': bnlearn_tabu_dimension_dict_scores["svm_r"],
#'BN Support Vector Machine (PC-sigmoid)': bnlearn_pc_dimension_dict_scores["svm"],
#'BN Support Vector Machine (PC-polynomial)': bnlearn_pc_dimension_dict_scores["svm_po"],
#'BN Support Vector Machine (PC-rbf)': bnlearn_pc_dimension_dict_scores["svm_r"],
'BN Support Vector Machine (MMHC-sigmoid)': bnlearn_mmhc_dimension_dict_scores["svm"],
'BN Support Vector Machine (MMHC-polynomial)': bnlearn_mmhc_dimension_dict_scores["svm_po"],
'BN Support Vector Machine (MMHC-rbf)': bnlearn_mmhc_dimension_dict_scores["svm_r"],
'BN Support Vector Machine (RSMAX2-sigmoid)': bnlearn_rsmax2_dimension_dict_scores["svm"],
'BN Support Vector Machine (RSMAX2-polynomial)': bnlearn_rsmax2_dimension_dict_scores[
"svm_po"],
'BN Support Vector Machine (RSMAX2-rbf)': bnlearn_rsmax2_dimension_dict_scores["svm_r"],
'BN Support Vector Machine (H2PC-sigmoid)': bnlearn_h2pc_dimension_dict_scores["svm"],
'BN Support Vector Machine (H2PC-polynomial)': bnlearn_h2pc_dimension_dict_scores["svm_po"],
'BN Support Vector Machine (H2PC-rbf)': bnlearn_h2pc_dimension_dict_scores["svm_r"],
'NT Support Vector Machine (logistic-sigmoid)': notears_dimension_dict_scores["svm"],
'NT Support Vector Machine (logistic-polynomial)': notears_dimension_dict_scores["svm_po"],
'NT Support Vector Machine (logistic-rbf)': notears_dimension_dict_scores["svm_r"],
'NT Support Vector Machine (L2-sigmoid)': notears_l2_dimension_dict_scores["svm"],
'NT Support Vector Machine (L2-polynomial)': notears_l2_dimension_dict_scores["svm_po"],
'NT Support Vector Machine (L2-rbf)': notears_l2_dimension_dict_scores["svm_r"],
'NT Support Vector Machine (Poisson-sigmoid)': notears_poisson_dimension_dict_scores["svm"],
'NT Support Vector Machine (Poisson-polynomial)': notears_poisson_dimension_dict_scores[
"svm_po"],
'NT Support Vector Machine (Poisson-rbf)': notears_poisson_dimension_dict_scores["svm_r"],
'Pomegranate Support Vector Machine (Exact-sigmoid)': pomegranate_exact_dimension_dict_scores[
"svm"], 'Pomegranate Support Vector Machine (Exact-polynomial)':
pomegranate_exact_dimension_dict_scores["svm_po"],
'Pomegranate Support Vector Machine (Exact-rbf)': pomegranate_exact_dimension_dict_scores[
"svm_r"], 'Pomegranate Support Vector Machine (Greedy-sigmoid)':
pomegranate_greedy_dimension_dict_scores["svm"],
'Pomegranate Support Vector Machine (Greedy-polynomial)':
pomegranate_greedy_dimension_dict_scores["svm_po"],
'Pomegranate Support Vector Machine (Greedy-rbf)': pomegranate_greedy_dimension_dict_scores[
"svm_r"],
'PGMPY Support Vector Machine (HC-sigmoid)': pgmpy_hc_dimension_dict_scores["svm"],
'PGMPY Support Vector Machine (HC-polynomial)': pgmpy_hc_dimension_dict_scores["svm_po"],
'PGMPY Support Vector Machine (HC-rbf)': pgmpy_hc_dimension_dict_scores["svm_r"],
'PGMPY Support Vector Machine (MMHC-sigmoid)': pgmpy_mmhc_dimension_dict_scores["svm"],
'PGMPY Support Vector Machine (MMHC-polynomial)': pgmpy_mmhc_dimension_dict_scores["svm_po"],
'PGMPY Support Vector Machine (MMHC-rbf)': pgmpy_mmhc_dimension_dict_scores["svm_r"],
'PGMPY Support Vector Machine (TREE-sigmoid)': pgmpy_tree_dimension_dict_scores["svm"],
'PGMPY Support Vector Machine (TREE-polynomial)': pgmpy_tree_dimension_dict_scores["svm_po"],
'PGMPY Support Vector Machine (TREE-rbf)': pgmpy_tree_dimension_dict_scores["svm_r"],
'BN K Nearest Neighbor (HC-weight)': bnlearn_dimension_dict_scores["knn"],
'BN K Nearest Neighbor (HC-distance)': bnlearn_dimension_dict_scores["knn_d"],
'BN K Nearest Neighbor (TABU-weight)': bnlearn_tabu_dimension_dict_scores["knn"],
'BN K Nearest Neighbor (TABU-distance)': bnlearn_tabu_dimension_dict_scores["knn_d"],
#'BN K Nearest Neighbor (PC-weight)': bnlearn_pc_dimension_dict_scores["knn"],
#'BN K Nearest Neighbor (PC-distance)': bnlearn_pc_dimension_dict_scores["knn_d"],
'BN K Nearest Neighbor (MMHC-weight)': bnlearn_mmhc_dimension_dict_scores["knn"],
'BN K Nearest Neighbor (MMHC-distance)': bnlearn_mmhc_dimension_dict_scores["knn_d"],
'BN K Nearest Neighbor (RSMAX2-weight)': bnlearn_rsmax2_dimension_dict_scores["knn"],
'BN K Nearest Neighbor (RSMAX2-distance)': bnlearn_rsmax2_dimension_dict_scores["knn_d"],
'BN K Nearest Neighbor (H2PC-weight)': bnlearn_h2pc_dimension_dict_scores["knn"],
'BN K Nearest Neighbor (H2PC-distance)': bnlearn_h2pc_dimension_dict_scores["knn_d"],
'NT K Nearest Neighbor (Logistic-weight)': notears_dimension_dict_scores["knn"],
'NT K Nearest Neighbor (Logistic-distance)': notears_dimension_dict_scores["knn_d"],
'NT K Nearest Neighbor (L2-weight)': notears_l2_dimension_dict_scores["knn"],
'NT K Nearest Neighbor (L2-distance)': notears_l2_dimension_dict_scores["knn_d"],
'NT K Nearest Neighbor (Poisson-weight)': notears_poisson_dimension_dict_scores["knn"],
'NT K Nearest Neighbor (Poisson-distance)': notears_poisson_dimension_dict_scores["knn_d"],
'POMEGRANATE K Nearest Neighbor (Exact-weight)': pomegranate_exact_dimension_dict_scores[
"knn"],
'POMEGRANATE K Nearest Neighbor (Exact-distance)': pomegranate_exact_dimension_dict_scores[
"knn_d"],
'POMEGRANATE K Nearest Neighbor (Greedy-weight)': pomegranate_greedy_dimension_dict_scores[
"knn"],
'POMEGRANATE K Nearest Neighbor (Greedy-distance)': pomegranate_greedy_dimension_dict_scores[
"knn_d"], 'PGMPY K Nearest Neighbor (HC-weight)': pgmpy_hc_dimension_dict_scores["knn"],
'PGMPY K Nearest Neighbor (HC-distance)': pgmpy_hc_dimension_dict_scores["knn_d"],
'PGMPY K Nearest Neighbor (MMHC-weight)': pgmpy_mmhc_dimension_dict_scores["knn"],
'PGMPY K Nearest Neighbor (MMHC-distance)': pgmpy_mmhc_dimension_dict_scores["knn_d"],
'PGMPY K Nearest Neighbor (TREE-weight)': pgmpy_tree_dimension_dict_scores["knn"],
'PGMPY K Nearest Neighbor (TREE-distance)': pgmpy_tree_dimension_dict_scores["knn_d"]}
top_learned_dimension = max(sim_dimension_workflows, key=sim_dimension_workflows.get)
print("Learned world - Dimensional problem, Prediction: "+ top_learned_dimension + " (" + str(sim_dimension_workflows[top_learned_dimension]) + ")")
real_experiment_summary = pd.read_csv("real_experiments_summary.csv")
real_experiment_summary
learned_experiment_summary = pd.read_csv("simulation_experiments_summary.csv")
learned_experiment_summary
prediction_real_learned()
| 104.418583
| 18,922
| 0.681068
| 55,255
| 440,542
| 4.962338
| 0.006823
| 0.153833
| 0.068973
| 0.021138
| 0.982815
| 0.972377
| 0.941735
| 0.743977
| 0.603266
| 0.511609
| 0
| 0.014729
| 0.193686
| 440,542
| 4,218
| 18,923
| 104.443338
| 0.757181
| 0.13356
| 0
| 0.19639
| 0
| 0
| 0.206952
| 0.002612
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001915
| false
| 0
| 0.022429
| 0
| 0.024617
| 0.003829
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
baf753f06ad3c6f961914a82b698ca4f45e00a5a
| 104
|
py
|
Python
|
backend/app/crud/__init__.py
|
djangbahevans/peg-case-study
|
a0559f86e91ab7caff1cd730d580fa61625306ce
|
[
"MIT"
] | 2
|
2022-03-27T17:19:09.000Z
|
2022-03-27T17:21:02.000Z
|
backend/app/crud/__init__.py
|
djangbahevans/peg-case-study
|
a0559f86e91ab7caff1cd730d580fa61625306ce
|
[
"MIT"
] | null | null | null |
backend/app/crud/__init__.py
|
djangbahevans/peg-case-study
|
a0559f86e91ab7caff1cd730d580fa61625306ce
|
[
"MIT"
] | null | null | null |
from .crud_user import user
from .crud_reservation import reservation
from .crud_payment import payment
| 26
| 41
| 0.855769
| 15
| 104
| 5.733333
| 0.4
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 104
| 3
| 42
| 34.666667
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2439c8397d30c77696d22aeda814112b93823892
| 107
|
py
|
Python
|
models/__init__.py
|
Shashank-Holla/motleyNet
|
05a8c758f650a90f5f53e51bb89909fdc1b735f4
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Shashank-Holla/motleyNet
|
05a8c758f650a90f5f53e51bb89909fdc1b735f4
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Shashank-Holla/motleyNet
|
05a8c758f650a90f5f53e51bb89909fdc1b735f4
|
[
"MIT"
] | null | null | null |
from .cifar_model import *
from .mnist_model import *
from .resnet import *
from .custom_resnet import *
| 26.75
| 28
| 0.757009
| 15
| 107
| 5.2
| 0.466667
| 0.384615
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168224
| 107
| 4
| 28
| 26.75
| 0.876404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
24503550d22d89067f22e17b6ceb87a6522f464b
| 156
|
py
|
Python
|
sortingComplexity/__main__.py
|
kenburke/sortingAlgorithms
|
cfc7835c5fc0df6a3836d9d12f1071776ee3c472
|
[
"Apache-2.0"
] | null | null | null |
sortingComplexity/__main__.py
|
kenburke/sortingAlgorithms
|
cfc7835c5fc0df6a3836d9d12f1071776ee3c472
|
[
"Apache-2.0"
] | null | null | null |
sortingComplexity/__main__.py
|
kenburke/sortingAlgorithms
|
cfc7835c5fc0df6a3836d9d12f1071776ee3c472
|
[
"Apache-2.0"
] | null | null | null |
# this file is only called when the package is called from the command
# line
from .run import basic_test, complexity_test
basic_test()
complexity_test()
| 19.5
| 70
| 0.788462
| 25
| 156
| 4.76
| 0.64
| 0.151261
| 0.319328
| 0.386555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160256
| 156
| 7
| 71
| 22.285714
| 0.908397
| 0.467949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
24641be835070ae1f0cb45af5dec30f3d1aef27c
| 16,790
|
py
|
Python
|
commands3.py
|
ShravanPY/pybot-timmy
|
2df9c53979c726a3b9b5768cdec53e90d5c5b624
|
[
"BSD-3-Clause"
] | null | null | null |
commands3.py
|
ShravanPY/pybot-timmy
|
2df9c53979c726a3b9b5768cdec53e90d5c5b624
|
[
"BSD-3-Clause"
] | null | null | null |
commands3.py
|
ShravanPY/pybot-timmy
|
2df9c53979c726a3b9b5768cdec53e90d5c5b624
|
[
"BSD-3-Clause"
] | null | null | null |
import discord
from discord.ext import commands
import asyncio
import time
class ModerationCommands(commands.Cog):
def __init__(self, client):
self.client = client
self.spamDetect[guild.id] = True
# Anti-Spam detection (beta)
@client.event
async def on_message(message):
if self.spamDetect[guild.id]:
counter = 0
channel = message.channel
with open('AntiSpam.txt', "r+") as file:
for line in file:
if line.strip("\n") == str(message.author.id):
counter += 1
file.writelines(f'{str(message.author.id)}\n')
if counter > 6:
muted_role = discord.utils.get(message.guild.roles, name="Muted")
if not muted_role:
muted_role = await message.guild.create_role(name="Muted")
for channel in message.guild.channels:
await channel.set_permissions(muted_role, speak=False, send_messages=False,
read_message_history=True, read_messages=False)
if message.author.guild_permissions.administrator:
return None
else:
await message.author.add_roles(muted_role, reason='Spam')
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name=f'{message.author} has been warned.', value=f'Moderator: Timmy',
inline=False)
embed.add_field(name=f'Reason:', value='Spam detected.', inline=True)
embed.add_field(name=f'Punishment:', value='Muted for 10 seconds', inline=True)
await channel.send(embed=embed)
await asyncio.sleep(10)
await message.author.remove_roles(muted_role)
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name=f'{message.author} has been unmuted.', value=f'Moderator: Timmy',
inline=False)
await channel.send(embed=embed)
await client.process_commands(message)
# Kick command
@commands.command(aliases=['as'])
async def antispam(self, ctx, mode=None):
guild = ctx.message.author.guild
if ctx.message.author.guild_permissions.ban_members or ctx.message.author.guild_permissions.kick_members or ctx.message.author.guild_permissions.administrator:
if mode == 'on':
if self.spamDetect[guild.id]:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Anti-spam is already ON', value='**To turn it off, say [//antispam off].**',
inline=False)
await ctx.send(embed=embed)
else:
self.spamDetect[guild.id] = True
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Anti-spam ON', value='**Will START checking for spam this session.**',
inline=False)
await ctx.send(embed=embed)
elif mode == 'off':
if not self.spamDetect[guild.id]:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Anti-spam is already OFF', value='**To turn it on, say [//antispam on].**',
inline=False)
await ctx.send(embed=embed)
else:
self.spamDetect[guild.id] = False
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Anti-spam OFF', value='**Will STOP checking for spam this session.**',
inline=False)
await ctx.send(embed=embed)
else:
if self.spamDetect[guild.id]:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Anti-spam is currently ON', value='**To turn it off, say [//antispam off].**',
inline=False)
await ctx.send(embed=embed)
else:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Anti-spam is currently OFF', value='**To turn it on, say [//antispam on].**',
inline=False)
await ctx.send(embed=embed)
else:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot use this command.**', inline=False)
await ctx.send(embed=embed)
# Kick command
@commands.command(aliases=['k'])
async def kick(self, ctx, user: discord.Member, *, reason=None):
if not ctx.message.author.guild_permissions.administrator:
if user.guild_permissions.ban_members or user.guild_permissions.kick_members or user.guild_permissions.administrator:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot kick this member because they have \
additional/similar moderation abilities compared to you.**',
inline=False)
await ctx.send(embed=embed)
elif ctx.message.author.guild_permissions.kick_members or ctx.message.author.guild_permissions.administrator:
if reason is None:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**Please provide a reason.**', inline=False)
await ctx.send(embed=embed)
else:
try:
await ctx.guild.kick(user=user, reason=reason)
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name=f'{user} has been kicked.', value=f'Moderator: {ctx.message.author}'
f'\nReason: {reason}', inline=False)
await ctx.send(embed=embed)
except discord.errors.Forbidden:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**Make sure that my role is higher than \
the role of the member you are kicking.**', inline=False)
await ctx.send(embed=embed)
else:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot use this command.**', inline=False)
await ctx.send(embed=embed)
# Ban command
@commands.command(aliases=['b'])
async def ban(self, ctx, user: discord.Member, *, reason=None):
if not ctx.message.author.guild_permissions.administrator:
if user.guild_permissions.ban_members or user.guild_permissions.kick_members or user.guild_permissions.administrator:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot ban this member because they have \
additional/similar moderation abilities compared to you.**',
inline=False)
await ctx.send(embed=embed)
elif ctx.message.author.guild_permissions.ban_members or ctx.message.author.guild_permissions.administrator:
if reason is None:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**Please provide a reason.**', inline=False)
await ctx.send(embed=embed)
else:
try:
await ctx.guild.ban(user=user, reason=reason)
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name=f'{user} has been banned.', value=f'Moderator: {ctx.message.author}'
f'\nReason: {reason}', inline=False)
await ctx.send(embed=embed)
except discord.errors.Forbidden:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**Make sure that my role is higher than \
the role of the member you are banning.**', inline=False)
await ctx.send(embed=embed)
else:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot use this command.**', inline=False)
await ctx.send(embed=embed)
# Unintuitive unban command
@commands.command(aliases=['ub'])
async def unban(self, ctx, *, member):
if ctx.message.author.guild_permissions.ban_members:
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name=f'{user} has been unbanned.', value=f'Moderator: {ctx.message.author}',
inline=False)
await ctx.send(embed=embed)
break
else:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot use this command.**', inline=False)
await ctx.send(embed=embed)
# Mute command
@commands.command(aliases=['m'])
async def mute(self, ctx, user: discord.Member, *, reason=None):
if not ctx.message.author.guild_permissions.administrator:
if user.guild_permissions.ban_members or user.guild_permissions.kick_members:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot mute this member because they have \
additional/similar moderation abilities compared to you.**',
inline=False)
await ctx.send(embed=embed)
elif ctx.message.author.guild_permissions.ban_members or ctx.message.author.guild_permissions.kick_members or ctx.message.author.guild_permissions.administrator:
if reason is None:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**Please provide a reason.**', inline=False)
await ctx.send(embed=embed)
else:
try:
muted_role = discord.utils.get(ctx.guild.roles, name="Muted")
if not muted_role:
muted_role = await ctx.guild.create_role(name="Muted")
for channel in ctx.guild.channels:
await channel.set_permissions(muted_role, speak=False, send_messages=False,
read_message_history=True, read_messages=False)
await user.add_roles(muted_role, reason=reason)
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name=f'{user} has been muted.', value=f'Moderator: {ctx.message.author}'
f'\nReason: {reason}', inline=False)
await ctx.send(embed=embed)
except discord.errors.Forbidden:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**Make sure that my role is higher than \
the role of the member you are muting.**', inline=False)
await ctx.send(embed=embed)
else:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot use this command.**', inline=False)
await ctx.send(embed=embed)
# Unmute command
@commands.command(aliases=['um'])
async def unmute(self, ctx, *, user: discord.Member):
if ctx.message.author.guild_permissions.ban_members:
muted_role = discord.utils.get(ctx.guild.roles, name="Muted")
await user.remove_roles(muted_role)
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name=f'{user} has been unmuted.', value=f'Moderator: {ctx.message.author}',
inline=False)
await ctx.send(embed=embed)
else:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot use this command.**', inline=False)
await ctx.send(embed=embed)
# Tempmute command (time goes before reason)
@commands.command(aliases=['tm'])
async def tempmute(self, ctx, user: discord.Member, time: int = None, *, reason=None):
if not ctx.message.author.guild_permissions.administrator:
if user.guild_permissions.ban_members or user.guild_permissions.kick_members or user.guild_permissions.administrator:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh.', value='**You cannot mute this member because they have \
additional/similar moderation abilities compared to you.**',
inline=False)
await ctx.send(embed=embed)
elif ctx.message.author.guild_permissions.ban_members or ctx.message.author.guild_permissions.kick_members or ctx.message.author.guild_permissions.administrator:
if reason is None:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**Please provide a reason.**', inline=False)
await ctx.send(embed=embed)
elif time is None:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**Please provide the number of seconds.**', inline=False)
await ctx.send(embed=embed)
else:
try:
muted_role = discord.utils.get(ctx.guild.roles, name="Muted")
if not muted_role:
muted_role = await ctx.guild.create_role(name="Muted")
for channel in ctx.guild.channels:
await channel.set_permissions(muted_role, speak=False, send_messages=False,
read_message_history=True, read_messages=False)
await user.add_roles(muted_role, reason=reason)
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name=f'{user} has been muted.', value=f'Moderator: {ctx.message.author}',
inline=False)
embed.add_field(name=f'Reason:', value=f'{reason}', inline=True)
embed.add_field(name=f'Time:', value=f'{time}', inline=True)
await ctx.send(embed=embed)
await asyncio.sleep(time)
await user.remove_roles(muted_role)
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name=f'{user} has been unmuted.', value=f'Moderator: {ctx.message.author}',
inline=False)
await ctx.send(embed=embed)
except discord.errors.Forbidden:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**Make sure that my role is higher than \
the role of the member you are muting.**', inline=False)
await ctx.send(embed=embed)
else:
embed = discord.Embed(color=discord.Color.red())
embed.add_field(name='Uh oh...', value='**You cannot use this command.**', inline=False)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(ModerationCommands(client))
| 58.501742
| 169
| 0.552055
| 1,868
| 16,790
| 4.883833
| 0.093683
| 0.034199
| 0.055574
| 0.072673
| 0.860682
| 0.832511
| 0.814644
| 0.808506
| 0.803025
| 0.783295
| 0
| 0.000629
| 0.337641
| 16,790
| 286
| 170
| 58.706294
| 0.81971
| 0.009589
| 0
| 0.671756
| 0
| 0
| 0.097353
| 0.001564
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007634
| false
| 0
| 0.015267
| 0
| 0.030534
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.